GNU Linux-libre 4.14.251-gnu1
[releases.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @cmd: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124         cont_a64_entry_t *cont_pkt;
125
126         /* Adjust ring index. */
127         req->ring_index++;
128         if (req->ring_index == req->length) {
129                 req->ring_index = 0;
130                 req->ring_ptr = req->ring;
131         } else {
132                 req->ring_ptr++;
133         }
134
135         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137         /* Load packet defaults. */
138         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139             cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140             cpu_to_le32(CONTINUE_A64_TYPE);
141
142         return (cont_pkt);
143 }
144
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         uint32_t        *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         *((uint32_t *)(&cmd_pkt->entry_type)) =
205             cpu_to_le32(COMMAND_TYPE);
206
207         /* No data transfer */
208         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209                 cmd_pkt->byte_count = cpu_to_le32(0);
210                 return;
211         }
212
213         vha = sp->vha;
214         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215
216         /* Three DSDs are available in the Command Type 2 IOCB */
217         avail_dsds = 3;
218         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220         /* Load data segments */
221         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222                 cont_entry_t *cont_pkt;
223
224                 /* Allocate additional continuation packets? */
225                 if (avail_dsds == 0) {
226                         /*
227                          * Seven DSDs are available in the Continuation
228                          * Type 0 IOCB.
229                          */
230                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232                         avail_dsds = 7;
233                 }
234
235                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237                 avail_dsds--;
238         }
239 }
240
241 /**
242  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243  * capable IOCB types.
244  *
245  * @sp: SRB command to process
246  * @cmd_pkt: Command type 3 IOCB
247  * @tot_dsds: Total number of segments to transfer
248  */
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250     uint16_t tot_dsds)
251 {
252         uint16_t        avail_dsds;
253         uint32_t        *cur_dsd;
254         scsi_qla_host_t *vha;
255         struct scsi_cmnd *cmd;
256         struct scatterlist *sg;
257         int i;
258
259         cmd = GET_CMD_SP(sp);
260
261         /* Update entry type to indicate Command Type 3 IOCB */
262         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
263
264         /* No data transfer */
265         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266                 cmd_pkt->byte_count = cpu_to_le32(0);
267                 return;
268         }
269
270         vha = sp->vha;
271         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272
273         /* Two DSDs are available in the Command Type 3 IOCB */
274         avail_dsds = 2;
275         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276
277         /* Load data segments */
278         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279                 dma_addr_t      sle_dma;
280                 cont_a64_entry_t *cont_pkt;
281
282                 /* Allocate additional continuation packets? */
283                 if (avail_dsds == 0) {
284                         /*
285                          * Five DSDs are available in the Continuation
286                          * Type 1 IOCB.
287                          */
288                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290                         avail_dsds = 5;
291                 }
292
293                 sle_dma = sg_dma_address(sg);
294                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297                 avail_dsds--;
298         }
299 }
300
301 /**
302  * qla2x00_start_scsi() - Send a SCSI command to the ISP
303  * @sp: command to send to the ISP
304  *
305  * Returns non-zero if a failure occurred, else zero.
306  */
307 int
308 qla2x00_start_scsi(srb_t *sp)
309 {
310         int             nseg;
311         unsigned long   flags;
312         scsi_qla_host_t *vha;
313         struct scsi_cmnd *cmd;
314         uint32_t        *clr_ptr;
315         uint32_t        index;
316         uint32_t        handle;
317         cmd_entry_t     *cmd_pkt;
318         uint16_t        cnt;
319         uint16_t        req_cnt;
320         uint16_t        tot_dsds;
321         struct device_reg_2xxx __iomem *reg;
322         struct qla_hw_data *ha;
323         struct req_que *req;
324         struct rsp_que *rsp;
325
326         /* Setup device pointers. */
327         vha = sp->vha;
328         ha = vha->hw;
329         reg = &ha->iobase->isp;
330         cmd = GET_CMD_SP(sp);
331         req = ha->req_q_map[0];
332         rsp = ha->rsp_q_map[0];
333         /* So we know we haven't pci_map'ed anything yet */
334         tot_dsds = 0;
335
336         /* Send marker if required */
337         if (vha->marker_needed != 0) {
338                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
339                     QLA_SUCCESS) {
340                         return (QLA_FUNCTION_FAILED);
341                 }
342                 vha->marker_needed = 0;
343         }
344
345         /* Acquire ring specific lock */
346         spin_lock_irqsave(&ha->hardware_lock, flags);
347
348         /* Check for room in outstanding command list. */
349         handle = req->current_outstanding_cmd;
350         for (index = 1; index < req->num_outstanding_cmds; index++) {
351                 handle++;
352                 if (handle == req->num_outstanding_cmds)
353                         handle = 1;
354                 if (!req->outstanding_cmds[handle])
355                         break;
356         }
357         if (index == req->num_outstanding_cmds)
358                 goto queuing_error;
359
360         /* Map the sg table so we have an accurate count of sg entries needed */
361         if (scsi_sg_count(cmd)) {
362                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363                     scsi_sg_count(cmd), cmd->sc_data_direction);
364                 if (unlikely(!nseg))
365                         goto queuing_error;
366         } else
367                 nseg = 0;
368
369         tot_dsds = nseg;
370
371         /* Calculate the number of request entries needed. */
372         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
373         if (req->cnt < (req_cnt + 2)) {
374                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375                 if (req->ring_index < cnt)
376                         req->cnt = cnt - req->ring_index;
377                 else
378                         req->cnt = req->length -
379                             (req->ring_index - cnt);
380                 /* If still no head room then bail out */
381                 if (req->cnt < (req_cnt + 2))
382                         goto queuing_error;
383         }
384
385         /* Build command packet */
386         req->current_outstanding_cmd = handle;
387         req->outstanding_cmds[handle] = sp;
388         sp->handle = handle;
389         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
390         req->cnt -= req_cnt;
391
392         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
393         cmd_pkt->handle = handle;
394         /* Zero out remaining portion of packet. */
395         clr_ptr = (uint32_t *)cmd_pkt + 2;
396         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398
399         /* Set target ID and LUN number*/
400         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
401         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
402         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
403
404         /* Load SCSI command packet. */
405         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
406         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
407
408         /* Build IOCB segments */
409         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
410
411         /* Set total data segment count. */
412         cmd_pkt->entry_count = (uint8_t)req_cnt;
413         wmb();
414
415         /* Adjust ring index. */
416         req->ring_index++;
417         if (req->ring_index == req->length) {
418                 req->ring_index = 0;
419                 req->ring_ptr = req->ring;
420         } else
421                 req->ring_ptr++;
422
423         sp->flags |= SRB_DMA_VALID;
424
425         /* Set chip new ring index. */
426         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
427         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
428
429         /* Manage unprocessed RIO/ZIO commands in response queue. */
430         if (vha->flags.process_response_queue &&
431             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432                 qla2x00_process_response_queue(rsp);
433
434         spin_unlock_irqrestore(&ha->hardware_lock, flags);
435         return (QLA_SUCCESS);
436
437 queuing_error:
438         if (tot_dsds)
439                 scsi_dma_unmap(cmd);
440
441         spin_unlock_irqrestore(&ha->hardware_lock, flags);
442
443         return (QLA_FUNCTION_FAILED);
444 }
445
446 /**
447  * qla2x00_start_iocbs() - Execute the IOCB command
448  */
449 void
450 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
451 {
452         struct qla_hw_data *ha = vha->hw;
453         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
454
455         if (IS_P3P_TYPE(ha)) {
456                 qla82xx_start_iocbs(vha);
457         } else {
458                 /* Adjust ring index. */
459                 req->ring_index++;
460                 if (req->ring_index == req->length) {
461                         req->ring_index = 0;
462                         req->ring_ptr = req->ring;
463                 } else
464                         req->ring_ptr++;
465
466                 /* Set chip new ring index. */
467                 if (ha->mqenable || IS_QLA27XX(ha)) {
468                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
469                 } else if (IS_QLA83XX(ha)) {
470                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
471                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
472                 } else if (IS_QLAFX00(ha)) {
473                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
474                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
475                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
476                 } else if (IS_FWI2_CAPABLE(ha)) {
477                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
478                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
479                 } else {
480                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
481                                 req->ring_index);
482                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
483                 }
484         }
485 }
486
487 /**
488  * qla2x00_marker() - Send a marker IOCB to the firmware.
489  * @ha: HA context
490  * @loop_id: loop ID
491  * @lun: LUN
492  * @type: marker modifier
493  *
494  * Can be called from both normal and interrupt context.
495  *
496  * Returns non-zero if a failure occurred, else zero.
497  */
498 static int
499 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
500                         struct rsp_que *rsp, uint16_t loop_id,
501                         uint64_t lun, uint8_t type)
502 {
503         mrk_entry_t *mrk;
504         struct mrk_entry_24xx *mrk24 = NULL;
505
506         struct qla_hw_data *ha = vha->hw;
507         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
508
509         req = ha->req_q_map[0];
510         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
511         if (mrk == NULL) {
512                 ql_log(ql_log_warn, base_vha, 0x3026,
513                     "Failed to allocate Marker IOCB.\n");
514
515                 return (QLA_FUNCTION_FAILED);
516         }
517
518         mrk->entry_type = MARKER_TYPE;
519         mrk->modifier = type;
520         if (type != MK_SYNC_ALL) {
521                 if (IS_FWI2_CAPABLE(ha)) {
522                         mrk24 = (struct mrk_entry_24xx *) mrk;
523                         mrk24->nport_handle = cpu_to_le16(loop_id);
524                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
525                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
526                         mrk24->vp_index = vha->vp_idx;
527                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
528                 } else {
529                         SET_TARGET_ID(ha, mrk->target, loop_id);
530                         mrk->lun = cpu_to_le16((uint16_t)lun);
531                 }
532         }
533         wmb();
534
535         qla2x00_start_iocbs(vha, req);
536
537         return (QLA_SUCCESS);
538 }
539
540 int
541 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
542                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
543                 uint8_t type)
544 {
545         int ret;
546         unsigned long flags = 0;
547
548         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
549         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
550         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
551
552         return (ret);
553 }
554
555 /*
556  * qla2x00_issue_marker
557  *
558  * Issue marker
559  * Caller CAN have hardware lock held as specified by ha_locked parameter.
560  * Might release it, then reaquire.
561  */
562 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
563 {
564         if (ha_locked) {
565                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
566                                         MK_SYNC_ALL) != QLA_SUCCESS)
567                         return QLA_FUNCTION_FAILED;
568         } else {
569                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
570                                         MK_SYNC_ALL) != QLA_SUCCESS)
571                         return QLA_FUNCTION_FAILED;
572         }
573         vha->marker_needed = 0;
574
575         return QLA_SUCCESS;
576 }
577
578 static inline int
579 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
580         uint16_t tot_dsds)
581 {
582         uint32_t *cur_dsd = NULL;
583         scsi_qla_host_t *vha;
584         struct qla_hw_data *ha;
585         struct scsi_cmnd *cmd;
586         struct  scatterlist *cur_seg;
587         uint32_t *dsd_seg;
588         void *next_dsd;
589         uint8_t avail_dsds;
590         uint8_t first_iocb = 1;
591         uint32_t dsd_list_len;
592         struct dsd_dma *dsd_ptr;
593         struct ct6_dsd *ctx;
594
595         cmd = GET_CMD_SP(sp);
596
597         /* Update entry type to indicate Command Type 3 IOCB */
598         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
599
600         /* No data transfer */
601         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
602                 cmd_pkt->byte_count = cpu_to_le32(0);
603                 return 0;
604         }
605
606         vha = sp->vha;
607         ha = vha->hw;
608
609         /* Set transfer direction */
610         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
611                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
612                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
613                 vha->qla_stats.output_requests++;
614         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
615                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
616                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
617                 vha->qla_stats.input_requests++;
618         }
619
620         cur_seg = scsi_sglist(cmd);
621         ctx = GET_CMD_CTX_SP(sp);
622
623         while (tot_dsds) {
624                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
625                     QLA_DSDS_PER_IOCB : tot_dsds;
626                 tot_dsds -= avail_dsds;
627                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
628
629                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
630                     struct dsd_dma, list);
631                 next_dsd = dsd_ptr->dsd_addr;
632                 list_del(&dsd_ptr->list);
633                 ha->gbl_dsd_avail--;
634                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
635                 ctx->dsd_use_cnt++;
636                 ha->gbl_dsd_inuse++;
637
638                 if (first_iocb) {
639                         first_iocb = 0;
640                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
641                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
642                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
643                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
644                 } else {
645                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
646                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
647                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
648                 }
649                 cur_dsd = (uint32_t *)next_dsd;
650                 while (avail_dsds) {
651                         dma_addr_t      sle_dma;
652
653                         sle_dma = sg_dma_address(cur_seg);
654                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
655                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
656                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
657                         cur_seg = sg_next(cur_seg);
658                         avail_dsds--;
659                 }
660         }
661
662         /* Null termination */
663         *cur_dsd++ =  0;
664         *cur_dsd++ = 0;
665         *cur_dsd++ = 0;
666         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
667         return 0;
668 }
669
670 /*
671  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672  * for Command Type 6.
673  *
674  * @dsds: number of data segment decriptors needed
675  *
676  * Returns the number of dsd list needed to store @dsds.
677  */
678 static inline uint16_t
679 qla24xx_calc_dsd_lists(uint16_t dsds)
680 {
681         uint16_t dsd_lists = 0;
682
683         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684         if (dsds % QLA_DSDS_PER_IOCB)
685                 dsd_lists++;
686         return dsd_lists;
687 }
688
689
690 /**
691  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
692  * IOCB types.
693  *
694  * @sp: SRB command to process
695  * @cmd_pkt: Command type 3 IOCB
696  * @tot_dsds: Total number of segments to transfer
697  * @req: pointer to request queue
698  */
699 inline void
700 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
701         uint16_t tot_dsds, struct req_que *req)
702 {
703         uint16_t        avail_dsds;
704         uint32_t        *cur_dsd;
705         scsi_qla_host_t *vha;
706         struct scsi_cmnd *cmd;
707         struct scatterlist *sg;
708         int i;
709
710         cmd = GET_CMD_SP(sp);
711
712         /* Update entry type to indicate Command Type 3 IOCB */
713         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
714
715         /* No data transfer */
716         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
717                 cmd_pkt->byte_count = cpu_to_le32(0);
718                 return;
719         }
720
721         vha = sp->vha;
722
723         /* Set transfer direction */
724         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
725                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
726                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
727                 vha->qla_stats.output_requests++;
728         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
729                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
730                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
731                 vha->qla_stats.input_requests++;
732         }
733
734         /* One DSD is available in the Command Type 3 IOCB */
735         avail_dsds = 1;
736         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
737
738         /* Load data segments */
739
740         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
741                 dma_addr_t      sle_dma;
742                 cont_a64_entry_t *cont_pkt;
743
744                 /* Allocate additional continuation packets? */
745                 if (avail_dsds == 0) {
746                         /*
747                          * Five DSDs are available in the Continuation
748                          * Type 1 IOCB.
749                          */
750                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
751                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
752                         avail_dsds = 5;
753                 }
754
755                 sle_dma = sg_dma_address(sg);
756                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
757                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
758                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
759                 avail_dsds--;
760         }
761 }
762
763 struct fw_dif_context {
764         uint32_t ref_tag;
765         uint16_t app_tag;
766         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
767         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
768 };
769
770 /*
771  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
772  *
773  */
774 static inline void
775 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
776     unsigned int protcnt)
777 {
778         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
779
780         switch (scsi_get_prot_type(cmd)) {
781         case SCSI_PROT_DIF_TYPE0:
782                 /*
783                  * No check for ql2xenablehba_err_chk, as it would be an
784                  * I/O error if hba tag generation is not done.
785                  */
786                 pkt->ref_tag = cpu_to_le32((uint32_t)
787                     (0xffffffff & scsi_get_lba(cmd)));
788
789                 if (!qla2x00_hba_err_chk_enabled(sp))
790                         break;
791
792                 pkt->ref_tag_mask[0] = 0xff;
793                 pkt->ref_tag_mask[1] = 0xff;
794                 pkt->ref_tag_mask[2] = 0xff;
795                 pkt->ref_tag_mask[3] = 0xff;
796                 break;
797
798         /*
799          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
800          * match LBA in CDB + N
801          */
802         case SCSI_PROT_DIF_TYPE2:
803                 pkt->app_tag = cpu_to_le16(0);
804                 pkt->app_tag_mask[0] = 0x0;
805                 pkt->app_tag_mask[1] = 0x0;
806
807                 pkt->ref_tag = cpu_to_le32((uint32_t)
808                     (0xffffffff & scsi_get_lba(cmd)));
809
810                 if (!qla2x00_hba_err_chk_enabled(sp))
811                         break;
812
813                 /* enable ALL bytes of the ref tag */
814                 pkt->ref_tag_mask[0] = 0xff;
815                 pkt->ref_tag_mask[1] = 0xff;
816                 pkt->ref_tag_mask[2] = 0xff;
817                 pkt->ref_tag_mask[3] = 0xff;
818                 break;
819
820         /* For Type 3 protection: 16 bit GUARD only */
821         case SCSI_PROT_DIF_TYPE3:
822                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
823                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
824                                                                 0x00;
825                 break;
826
827         /*
828          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
829          * 16 bit app tag.
830          */
831         case SCSI_PROT_DIF_TYPE1:
832                 pkt->ref_tag = cpu_to_le32((uint32_t)
833                     (0xffffffff & scsi_get_lba(cmd)));
834                 pkt->app_tag = cpu_to_le16(0);
835                 pkt->app_tag_mask[0] = 0x0;
836                 pkt->app_tag_mask[1] = 0x0;
837
838                 if (!qla2x00_hba_err_chk_enabled(sp))
839                         break;
840
841                 /* enable ALL bytes of the ref tag */
842                 pkt->ref_tag_mask[0] = 0xff;
843                 pkt->ref_tag_mask[1] = 0xff;
844                 pkt->ref_tag_mask[2] = 0xff;
845                 pkt->ref_tag_mask[3] = 0xff;
846                 break;
847         }
848 }
849
850 int
851 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
852         uint32_t *partial)
853 {
854         struct scatterlist *sg;
855         uint32_t cumulative_partial, sg_len;
856         dma_addr_t sg_dma_addr;
857
858         if (sgx->num_bytes == sgx->tot_bytes)
859                 return 0;
860
861         sg = sgx->cur_sg;
862         cumulative_partial = sgx->tot_partial;
863
864         sg_dma_addr = sg_dma_address(sg);
865         sg_len = sg_dma_len(sg);
866
867         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
868
869         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
870                 sgx->dma_len = (blk_sz - cumulative_partial);
871                 sgx->tot_partial = 0;
872                 sgx->num_bytes += blk_sz;
873                 *partial = 0;
874         } else {
875                 sgx->dma_len = sg_len - sgx->bytes_consumed;
876                 sgx->tot_partial += sgx->dma_len;
877                 *partial = 1;
878         }
879
880         sgx->bytes_consumed += sgx->dma_len;
881
882         if (sg_len == sgx->bytes_consumed) {
883                 sg = sg_next(sg);
884                 sgx->num_sg++;
885                 sgx->cur_sg = sg;
886                 sgx->bytes_consumed = 0;
887         }
888
889         return 1;
890 }
891
892 int
893 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
894         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
895 {
896         void *next_dsd;
897         uint8_t avail_dsds = 0;
898         uint32_t dsd_list_len;
899         struct dsd_dma *dsd_ptr;
900         struct scatterlist *sg_prot;
901         uint32_t *cur_dsd = dsd;
902         uint16_t        used_dsds = tot_dsds;
903         uint32_t        prot_int; /* protection interval */
904         uint32_t        partial;
905         struct qla2_sgx sgx;
906         dma_addr_t      sle_dma;
907         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
908         struct scsi_cmnd *cmd;
909
910         memset(&sgx, 0, sizeof(struct qla2_sgx));
911         if (sp) {
912                 cmd = GET_CMD_SP(sp);
913                 prot_int = cmd->device->sector_size;
914
915                 sgx.tot_bytes = scsi_bufflen(cmd);
916                 sgx.cur_sg = scsi_sglist(cmd);
917                 sgx.sp = sp;
918
919                 sg_prot = scsi_prot_sglist(cmd);
920         } else if (tc) {
921                 prot_int      = tc->blk_sz;
922                 sgx.tot_bytes = tc->bufflen;
923                 sgx.cur_sg    = tc->sg;
924                 sg_prot       = tc->prot_sg;
925         } else {
926                 BUG();
927                 return 1;
928         }
929
930         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
931
932                 sle_dma = sgx.dma_addr;
933                 sle_dma_len = sgx.dma_len;
934 alloc_and_fill:
935                 /* Allocate additional continuation packets? */
936                 if (avail_dsds == 0) {
937                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
938                                         QLA_DSDS_PER_IOCB : used_dsds;
939                         dsd_list_len = (avail_dsds + 1) * 12;
940                         used_dsds -= avail_dsds;
941
942                         /* allocate tracking DS */
943                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
944                         if (!dsd_ptr)
945                                 return 1;
946
947                         /* allocate new list */
948                         dsd_ptr->dsd_addr = next_dsd =
949                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
950                                 &dsd_ptr->dsd_list_dma);
951
952                         if (!next_dsd) {
953                                 /*
954                                  * Need to cleanup only this dsd_ptr, rest
955                                  * will be done by sp_free_dma()
956                                  */
957                                 kfree(dsd_ptr);
958                                 return 1;
959                         }
960
961                         if (sp) {
962                                 list_add_tail(&dsd_ptr->list,
963                                     &((struct crc_context *)
964                                             sp->u.scmd.ctx)->dsd_list);
965
966                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
967                         } else {
968                                 list_add_tail(&dsd_ptr->list,
969                                     &(tc->ctx->dsd_list));
970                                 *tc->ctx_dsd_alloced = 1;
971                         }
972
973
974                         /* add new list to cmd iocb or last list */
975                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
976                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
977                         *cur_dsd++ = dsd_list_len;
978                         cur_dsd = (uint32_t *)next_dsd;
979                 }
980                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
981                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
982                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
983                 avail_dsds--;
984
985                 if (partial == 0) {
986                         /* Got a full protection interval */
987                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
988                         sle_dma_len = 8;
989
990                         tot_prot_dma_len += sle_dma_len;
991                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
992                                 tot_prot_dma_len = 0;
993                                 sg_prot = sg_next(sg_prot);
994                         }
995
996                         partial = 1; /* So as to not re-enter this block */
997                         goto alloc_and_fill;
998                 }
999         }
1000         /* Null termination */
1001         *cur_dsd++ = 0;
1002         *cur_dsd++ = 0;
1003         *cur_dsd++ = 0;
1004         return 0;
1005 }
1006
1007 int
1008 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1009         uint16_t tot_dsds, struct qla_tc_param *tc)
1010 {
1011         void *next_dsd;
1012         uint8_t avail_dsds = 0;
1013         uint32_t dsd_list_len;
1014         struct dsd_dma *dsd_ptr;
1015         struct scatterlist *sg, *sgl;
1016         uint32_t *cur_dsd = dsd;
1017         int     i;
1018         uint16_t        used_dsds = tot_dsds;
1019         struct scsi_cmnd *cmd;
1020
1021         if (sp) {
1022                 cmd = GET_CMD_SP(sp);
1023                 sgl = scsi_sglist(cmd);
1024         } else if (tc) {
1025                 sgl = tc->sg;
1026         } else {
1027                 BUG();
1028                 return 1;
1029         }
1030
1031
1032         for_each_sg(sgl, sg, tot_dsds, i) {
1033                 dma_addr_t      sle_dma;
1034
1035                 /* Allocate additional continuation packets? */
1036                 if (avail_dsds == 0) {
1037                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038                                         QLA_DSDS_PER_IOCB : used_dsds;
1039                         dsd_list_len = (avail_dsds + 1) * 12;
1040                         used_dsds -= avail_dsds;
1041
1042                         /* allocate tracking DS */
1043                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1044                         if (!dsd_ptr)
1045                                 return 1;
1046
1047                         /* allocate new list */
1048                         dsd_ptr->dsd_addr = next_dsd =
1049                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050                                 &dsd_ptr->dsd_list_dma);
1051
1052                         if (!next_dsd) {
1053                                 /*
1054                                  * Need to cleanup only this dsd_ptr, rest
1055                                  * will be done by sp_free_dma()
1056                                  */
1057                                 kfree(dsd_ptr);
1058                                 return 1;
1059                         }
1060
1061                         if (sp) {
1062                                 list_add_tail(&dsd_ptr->list,
1063                                     &((struct crc_context *)
1064                                             sp->u.scmd.ctx)->dsd_list);
1065
1066                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1067                         } else {
1068                                 list_add_tail(&dsd_ptr->list,
1069                                     &(tc->ctx->dsd_list));
1070                                 *tc->ctx_dsd_alloced = 1;
1071                         }
1072
1073                         /* add new list to cmd iocb or last list */
1074                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076                         *cur_dsd++ = dsd_list_len;
1077                         cur_dsd = (uint32_t *)next_dsd;
1078                 }
1079                 sle_dma = sg_dma_address(sg);
1080
1081                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1084                 avail_dsds--;
1085
1086         }
1087         /* Null termination */
1088         *cur_dsd++ = 0;
1089         *cur_dsd++ = 0;
1090         *cur_dsd++ = 0;
1091         return 0;
1092 }
1093
1094 int
1095 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1096         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1097 {
1098         void *next_dsd;
1099         uint8_t avail_dsds = 0;
1100         uint32_t dsd_list_len;
1101         struct dsd_dma *dsd_ptr;
1102         struct scatterlist *sg, *sgl;
1103         int     i;
1104         struct scsi_cmnd *cmd;
1105         uint32_t *cur_dsd = dsd;
1106         uint16_t used_dsds = tot_dsds;
1107         struct scsi_qla_host *vha;
1108
1109         if (sp) {
1110                 cmd = GET_CMD_SP(sp);
1111                 sgl = scsi_prot_sglist(cmd);
1112                 vha = sp->vha;
1113         } else if (tc) {
1114                 vha = tc->vha;
1115                 sgl = tc->prot_sg;
1116         } else {
1117                 BUG();
1118                 return 1;
1119         }
1120
1121         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122                 "%s: enter\n", __func__);
1123
1124         for_each_sg(sgl, sg, tot_dsds, i) {
1125                 dma_addr_t      sle_dma;
1126
1127                 /* Allocate additional continuation packets? */
1128                 if (avail_dsds == 0) {
1129                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130                                                 QLA_DSDS_PER_IOCB : used_dsds;
1131                         dsd_list_len = (avail_dsds + 1) * 12;
1132                         used_dsds -= avail_dsds;
1133
1134                         /* allocate tracking DS */
1135                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136                         if (!dsd_ptr)
1137                                 return 1;
1138
1139                         /* allocate new list */
1140                         dsd_ptr->dsd_addr = next_dsd =
1141                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142                                 &dsd_ptr->dsd_list_dma);
1143
1144                         if (!next_dsd) {
1145                                 /*
1146                                  * Need to cleanup only this dsd_ptr, rest
1147                                  * will be done by sp_free_dma()
1148                                  */
1149                                 kfree(dsd_ptr);
1150                                 return 1;
1151                         }
1152
1153                         if (sp) {
1154                                 list_add_tail(&dsd_ptr->list,
1155                                     &((struct crc_context *)
1156                                             sp->u.scmd.ctx)->dsd_list);
1157
1158                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1159                         } else {
1160                                 list_add_tail(&dsd_ptr->list,
1161                                     &(tc->ctx->dsd_list));
1162                                 *tc->ctx_dsd_alloced = 1;
1163                         }
1164
1165                         /* add new list to cmd iocb or last list */
1166                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168                         *cur_dsd++ = dsd_list_len;
1169                         cur_dsd = (uint32_t *)next_dsd;
1170                 }
1171                 sle_dma = sg_dma_address(sg);
1172
1173                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1176
1177                 avail_dsds--;
1178         }
1179         /* Null termination */
1180         *cur_dsd++ = 0;
1181         *cur_dsd++ = 0;
1182         *cur_dsd++ = 0;
1183         return 0;
1184 }
1185
1186 /**
1187  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188  *                                                      Type 6 IOCB types.
1189  *
1190  * @sp: SRB command to process
1191  * @cmd_pkt: Command type 3 IOCB
1192  * @tot_dsds: Total number of segments to transfer
1193  */
1194 inline int
1195 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1197 {
1198         uint32_t                *cur_dsd, *fcp_dl;
1199         scsi_qla_host_t         *vha;
1200         struct scsi_cmnd        *cmd;
1201         uint32_t                total_bytes = 0;
1202         uint32_t                data_bytes;
1203         uint32_t                dif_bytes;
1204         uint8_t                 bundling = 1;
1205         uint16_t                blk_size;
1206         uint8_t                 *clr_ptr;
1207         struct crc_context      *crc_ctx_pkt = NULL;
1208         struct qla_hw_data      *ha;
1209         uint8_t                 additional_fcpcdb_len;
1210         uint16_t                fcp_cmnd_len;
1211         struct fcp_cmnd         *fcp_cmnd;
1212         dma_addr_t              crc_ctx_dma;
1213
1214         cmd = GET_CMD_SP(sp);
1215
1216         /* Update entry type to indicate Command Type CRC_2 IOCB */
1217         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1218
1219         vha = sp->vha;
1220         ha = vha->hw;
1221
1222         /* No data transfer */
1223         data_bytes = scsi_bufflen(cmd);
1224         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1225                 cmd_pkt->byte_count = cpu_to_le32(0);
1226                 return QLA_SUCCESS;
1227         }
1228
1229         cmd_pkt->vp_index = sp->vha->vp_idx;
1230
1231         /* Set transfer direction */
1232         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233                 cmd_pkt->control_flags =
1234                     cpu_to_le16(CF_WRITE_DATA);
1235         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236                 cmd_pkt->control_flags =
1237                     cpu_to_le16(CF_READ_DATA);
1238         }
1239
1240         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1244                 bundling = 0;
1245
1246         /* Allocate CRC context from global pool */
1247         crc_ctx_pkt = sp->u.scmd.ctx =
1248             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1249
1250         if (!crc_ctx_pkt)
1251                 goto crc_queuing_error;
1252
1253         /* Zero out CTX area. */
1254         clr_ptr = (uint8_t *)crc_ctx_pkt;
1255         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1256
1257         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1258
1259         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1260
1261         /* Set handle */
1262         crc_ctx_pkt->handle = cmd_pkt->handle;
1263
1264         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1265
1266         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1267             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1268
1269         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1272
1273         /* Determine SCSI command length -- align to 4 byte boundary */
1274         if (cmd->cmd_len > 16) {
1275                 additional_fcpcdb_len = cmd->cmd_len - 16;
1276                 if ((cmd->cmd_len % 4) != 0) {
1277                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1278                         goto crc_queuing_error;
1279                 }
1280                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1281         } else {
1282                 additional_fcpcdb_len = 0;
1283                 fcp_cmnd_len = 12 + 16 + 4;
1284         }
1285
1286         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1287
1288         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290                 fcp_cmnd->additional_cdb_len |= 1;
1291         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292                 fcp_cmnd->additional_cdb_len |= 2;
1293
1294         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1295         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301         fcp_cmnd->task_management = 0;
1302         fcp_cmnd->task_attribute = TSK_SIMPLE;
1303
1304         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1305
1306         /* Compute dif len and adjust data len to incude protection */
1307         dif_bytes = 0;
1308         blk_size = cmd->device->sector_size;
1309         dif_bytes = (data_bytes / blk_size) * 8;
1310
1311         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1312         case SCSI_PROT_READ_INSERT:
1313         case SCSI_PROT_WRITE_STRIP:
1314             total_bytes = data_bytes;
1315             data_bytes += dif_bytes;
1316             break;
1317
1318         case SCSI_PROT_READ_STRIP:
1319         case SCSI_PROT_WRITE_INSERT:
1320         case SCSI_PROT_READ_PASS:
1321         case SCSI_PROT_WRITE_PASS:
1322             total_bytes = data_bytes + dif_bytes;
1323             break;
1324         default:
1325             BUG();
1326         }
1327
1328         if (!qla2x00_hba_err_chk_enabled(sp))
1329                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1330         /* HBA error checking enabled */
1331         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334                         SCSI_PROT_DIF_TYPE2))
1335                         fw_prot_opts |= BIT_10;
1336                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337                     SCSI_PROT_DIF_TYPE3)
1338                         fw_prot_opts |= BIT_11;
1339         }
1340
1341         if (!bundling) {
1342                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1343         } else {
1344                 /*
1345                  * Configure Bundling if we need to fetch interlaving
1346                  * protection PCI accesses
1347                  */
1348                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1351                                                         tot_prot_dsds);
1352                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1353         }
1354
1355         /* Finish the common fields of CRC pkt */
1356         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1359         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1360         /* Fibre channel byte count */
1361         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363             additional_fcpcdb_len);
1364         *fcp_dl = htonl(total_bytes);
1365
1366         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1367                 cmd_pkt->byte_count = cpu_to_le32(0);
1368                 return QLA_SUCCESS;
1369         }
1370         /* Walks data segments */
1371
1372         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1373
1374         if (!bundling && tot_prot_dsds) {
1375                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1376                         cur_dsd, tot_dsds, NULL))
1377                         goto crc_queuing_error;
1378         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1379                         (tot_dsds - tot_prot_dsds), NULL))
1380                 goto crc_queuing_error;
1381
1382         if (bundling && tot_prot_dsds) {
1383                 /* Walks dif segments */
1384                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1385                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1387                                 tot_prot_dsds, NULL))
1388                         goto crc_queuing_error;
1389         }
1390         return QLA_SUCCESS;
1391
1392 crc_queuing_error:
1393         /* Cleanup will be performed by the caller */
1394
1395         return QLA_FUNCTION_FAILED;
1396 }
1397
1398 /**
1399  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400  * @sp: command to send to the ISP
1401  *
1402  * Returns non-zero if a failure occurred, else zero.
1403  */
1404 int
1405 qla24xx_start_scsi(srb_t *sp)
1406 {
1407         int             nseg;
1408         unsigned long   flags;
1409         uint32_t        *clr_ptr;
1410         uint32_t        index;
1411         uint32_t        handle;
1412         struct cmd_type_7 *cmd_pkt;
1413         uint16_t        cnt;
1414         uint16_t        req_cnt;
1415         uint16_t        tot_dsds;
1416         struct req_que *req = NULL;
1417         struct rsp_que *rsp = NULL;
1418         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1419         struct scsi_qla_host *vha = sp->vha;
1420         struct qla_hw_data *ha = vha->hw;
1421
1422         /* Setup device pointers. */
1423         req = vha->req;
1424         rsp = req->rsp;
1425
1426         /* So we know we haven't pci_map'ed anything yet */
1427         tot_dsds = 0;
1428
1429         /* Send marker if required */
1430         if (vha->marker_needed != 0) {
1431                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1432                     QLA_SUCCESS)
1433                         return QLA_FUNCTION_FAILED;
1434                 vha->marker_needed = 0;
1435         }
1436
1437         /* Acquire ring specific lock */
1438         spin_lock_irqsave(&ha->hardware_lock, flags);
1439
1440         /* Check for room in outstanding command list. */
1441         handle = req->current_outstanding_cmd;
1442         for (index = 1; index < req->num_outstanding_cmds; index++) {
1443                 handle++;
1444                 if (handle == req->num_outstanding_cmds)
1445                         handle = 1;
1446                 if (!req->outstanding_cmds[handle])
1447                         break;
1448         }
1449         if (index == req->num_outstanding_cmds)
1450                 goto queuing_error;
1451
1452         /* Map the sg table so we have an accurate count of sg entries needed */
1453         if (scsi_sg_count(cmd)) {
1454                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455                     scsi_sg_count(cmd), cmd->sc_data_direction);
1456                 if (unlikely(!nseg))
1457                         goto queuing_error;
1458         } else
1459                 nseg = 0;
1460
1461         tot_dsds = nseg;
1462         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1463         if (req->cnt < (req_cnt + 2)) {
1464                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465                     RD_REG_DWORD_RELAXED(req->req_q_out);
1466                 if (req->ring_index < cnt)
1467                         req->cnt = cnt - req->ring_index;
1468                 else
1469                         req->cnt = req->length -
1470                                 (req->ring_index - cnt);
1471                 if (req->cnt < (req_cnt + 2))
1472                         goto queuing_error;
1473         }
1474
1475         /* Build command packet. */
1476         req->current_outstanding_cmd = handle;
1477         req->outstanding_cmds[handle] = sp;
1478         sp->handle = handle;
1479         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1480         req->cnt -= req_cnt;
1481
1482         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1483         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1484
1485         /* Zero out remaining portion of packet. */
1486         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1487         clr_ptr = (uint32_t *)cmd_pkt + 2;
1488         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1490
1491         /* Set NPORT-ID and LUN number*/
1492         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1496         cmd_pkt->vp_index = sp->vha->vp_idx;
1497
1498         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1499         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1500
1501         cmd_pkt->task = TSK_SIMPLE;
1502
1503         /* Load SCSI command packet. */
1504         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1506
1507         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1508
1509         /* Build IOCB segments */
1510         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1511
1512         /* Set total data segment count. */
1513         cmd_pkt->entry_count = (uint8_t)req_cnt;
1514         wmb();
1515         /* Adjust ring index. */
1516         req->ring_index++;
1517         if (req->ring_index == req->length) {
1518                 req->ring_index = 0;
1519                 req->ring_ptr = req->ring;
1520         } else
1521                 req->ring_ptr++;
1522
1523         sp->flags |= SRB_DMA_VALID;
1524
1525         /* Set chip new ring index. */
1526         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1527
1528         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1529         return QLA_SUCCESS;
1530
1531 queuing_error:
1532         if (tot_dsds)
1533                 scsi_dma_unmap(cmd);
1534
1535         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1536
1537         return QLA_FUNCTION_FAILED;
1538 }
1539
1540 /**
1541  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1542  * @sp: command to send to the ISP
1543  *
1544  * Returns non-zero if a failure occurred, else zero.
1545  */
1546 int
1547 qla24xx_dif_start_scsi(srb_t *sp)
1548 {
1549         int                     nseg;
1550         unsigned long           flags;
1551         uint32_t                *clr_ptr;
1552         uint32_t                index;
1553         uint32_t                handle;
1554         uint16_t                cnt;
1555         uint16_t                req_cnt = 0;
1556         uint16_t                tot_dsds;
1557         uint16_t                tot_prot_dsds;
1558         uint16_t                fw_prot_opts = 0;
1559         struct req_que          *req = NULL;
1560         struct rsp_que          *rsp = NULL;
1561         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1562         struct scsi_qla_host    *vha = sp->vha;
1563         struct qla_hw_data      *ha = vha->hw;
1564         struct cmd_type_crc_2   *cmd_pkt;
1565         uint32_t                status = 0;
1566
1567 #define QDSS_GOT_Q_SPACE        BIT_0
1568
1569         /* Only process protection or >16 cdb in this routine */
1570         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1571                 if (cmd->cmd_len <= 16)
1572                         return qla24xx_start_scsi(sp);
1573         }
1574
1575         /* Setup device pointers. */
1576         req = vha->req;
1577         rsp = req->rsp;
1578
1579         /* So we know we haven't pci_map'ed anything yet */
1580         tot_dsds = 0;
1581
1582         /* Send marker if required */
1583         if (vha->marker_needed != 0) {
1584                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1585                     QLA_SUCCESS)
1586                         return QLA_FUNCTION_FAILED;
1587                 vha->marker_needed = 0;
1588         }
1589
1590         /* Acquire ring specific lock */
1591         spin_lock_irqsave(&ha->hardware_lock, flags);
1592
1593         /* Check for room in outstanding command list. */
1594         handle = req->current_outstanding_cmd;
1595         for (index = 1; index < req->num_outstanding_cmds; index++) {
1596                 handle++;
1597                 if (handle == req->num_outstanding_cmds)
1598                         handle = 1;
1599                 if (!req->outstanding_cmds[handle])
1600                         break;
1601         }
1602
1603         if (index == req->num_outstanding_cmds)
1604                 goto queuing_error;
1605
1606         /* Compute number of required data segments */
1607         /* Map the sg table so we have an accurate count of sg entries needed */
1608         if (scsi_sg_count(cmd)) {
1609                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1610                     scsi_sg_count(cmd), cmd->sc_data_direction);
1611                 if (unlikely(!nseg))
1612                         goto queuing_error;
1613                 else
1614                         sp->flags |= SRB_DMA_VALID;
1615
1616                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1617                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1618                         struct qla2_sgx sgx;
1619                         uint32_t        partial;
1620
1621                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1622                         sgx.tot_bytes = scsi_bufflen(cmd);
1623                         sgx.cur_sg = scsi_sglist(cmd);
1624                         sgx.sp = sp;
1625
1626                         nseg = 0;
1627                         while (qla24xx_get_one_block_sg(
1628                             cmd->device->sector_size, &sgx, &partial))
1629                                 nseg++;
1630                 }
1631         } else
1632                 nseg = 0;
1633
1634         /* number of required data segments */
1635         tot_dsds = nseg;
1636
1637         /* Compute number of required protection segments */
1638         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1639                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1640                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1641                 if (unlikely(!nseg))
1642                         goto queuing_error;
1643                 else
1644                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1645
1646                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1647                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1648                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1649                 }
1650         } else {
1651                 nseg = 0;
1652         }
1653
1654         req_cnt = 1;
1655         /* Total Data and protection sg segment(s) */
1656         tot_prot_dsds = nseg;
1657         tot_dsds += nseg;
1658         if (req->cnt < (req_cnt + 2)) {
1659                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1660                     RD_REG_DWORD_RELAXED(req->req_q_out);
1661                 if (req->ring_index < cnt)
1662                         req->cnt = cnt - req->ring_index;
1663                 else
1664                         req->cnt = req->length -
1665                                 (req->ring_index - cnt);
1666                 if (req->cnt < (req_cnt + 2))
1667                         goto queuing_error;
1668         }
1669
1670         status |= QDSS_GOT_Q_SPACE;
1671
1672         /* Build header part of command packet (excluding the OPCODE). */
1673         req->current_outstanding_cmd = handle;
1674         req->outstanding_cmds[handle] = sp;
1675         sp->handle = handle;
1676         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1677         req->cnt -= req_cnt;
1678
1679         /* Fill-in common area */
1680         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1681         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1682
1683         clr_ptr = (uint32_t *)cmd_pkt + 2;
1684         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1685
1686         /* Set NPORT-ID and LUN number*/
1687         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1688         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1689         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1690         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1691
1692         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1693         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1694
1695         /* Total Data and protection segment(s) */
1696         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1697
1698         /* Build IOCB segments and adjust for data protection segments */
1699         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1700             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1701                 QLA_SUCCESS)
1702                 goto queuing_error;
1703
1704         cmd_pkt->entry_count = (uint8_t)req_cnt;
1705         /* Specify response queue number where completion should happen */
1706         cmd_pkt->entry_status = (uint8_t) rsp->id;
1707         cmd_pkt->timeout = cpu_to_le16(0);
1708         wmb();
1709
1710         /* Adjust ring index. */
1711         req->ring_index++;
1712         if (req->ring_index == req->length) {
1713                 req->ring_index = 0;
1714                 req->ring_ptr = req->ring;
1715         } else
1716                 req->ring_ptr++;
1717
1718         /* Set chip new ring index. */
1719         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1720
1721         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1722
1723         return QLA_SUCCESS;
1724
1725 queuing_error:
1726         if (status & QDSS_GOT_Q_SPACE) {
1727                 req->outstanding_cmds[handle] = NULL;
1728                 req->cnt += req_cnt;
1729         }
1730         /* Cleanup will be performed by the caller (queuecommand) */
1731
1732         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1733         return QLA_FUNCTION_FAILED;
1734 }
1735
1736 /**
1737  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1738  * @sp: command to send to the ISP
1739  *
1740  * Returns non-zero if a failure occurred, else zero.
1741  */
1742 static int
1743 qla2xxx_start_scsi_mq(srb_t *sp)
1744 {
1745         int             nseg;
1746         unsigned long   flags;
1747         uint32_t        *clr_ptr;
1748         uint32_t        index;
1749         uint32_t        handle;
1750         struct cmd_type_7 *cmd_pkt;
1751         uint16_t        cnt;
1752         uint16_t        req_cnt;
1753         uint16_t        tot_dsds;
1754         struct req_que *req = NULL;
1755         struct rsp_que *rsp = NULL;
1756         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1757         struct scsi_qla_host *vha = sp->fcport->vha;
1758         struct qla_hw_data *ha = vha->hw;
1759         struct qla_qpair *qpair = sp->qpair;
1760
1761         /* Acquire qpair specific lock */
1762         spin_lock_irqsave(&qpair->qp_lock, flags);
1763
1764         /* Setup qpair pointers */
1765         rsp = qpair->rsp;
1766         req = qpair->req;
1767
1768         /* So we know we haven't pci_map'ed anything yet */
1769         tot_dsds = 0;
1770
1771         /* Send marker if required */
1772         if (vha->marker_needed != 0) {
1773                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1774                     QLA_SUCCESS) {
1775                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1776                         return QLA_FUNCTION_FAILED;
1777                 }
1778                 vha->marker_needed = 0;
1779         }
1780
1781         /* Check for room in outstanding command list. */
1782         handle = req->current_outstanding_cmd;
1783         for (index = 1; index < req->num_outstanding_cmds; index++) {
1784                 handle++;
1785                 if (handle == req->num_outstanding_cmds)
1786                         handle = 1;
1787                 if (!req->outstanding_cmds[handle])
1788                         break;
1789         }
1790         if (index == req->num_outstanding_cmds)
1791                 goto queuing_error;
1792
1793         /* Map the sg table so we have an accurate count of sg entries needed */
1794         if (scsi_sg_count(cmd)) {
1795                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1796                     scsi_sg_count(cmd), cmd->sc_data_direction);
1797                 if (unlikely(!nseg))
1798                         goto queuing_error;
1799         } else
1800                 nseg = 0;
1801
1802         tot_dsds = nseg;
1803         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1804         if (req->cnt < (req_cnt + 2)) {
1805                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1806                     RD_REG_DWORD_RELAXED(req->req_q_out);
1807                 if (req->ring_index < cnt)
1808                         req->cnt = cnt - req->ring_index;
1809                 else
1810                         req->cnt = req->length -
1811                                 (req->ring_index - cnt);
1812                 if (req->cnt < (req_cnt + 2))
1813                         goto queuing_error;
1814         }
1815
1816         /* Build command packet. */
1817         req->current_outstanding_cmd = handle;
1818         req->outstanding_cmds[handle] = sp;
1819         sp->handle = handle;
1820         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1821         req->cnt -= req_cnt;
1822
1823         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1824         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1825
1826         /* Zero out remaining portion of packet. */
1827         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1828         clr_ptr = (uint32_t *)cmd_pkt + 2;
1829         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1830         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1831
1832         /* Set NPORT-ID and LUN number*/
1833         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1834         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1835         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1836         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1837         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1838
1839         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1840         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1841
1842         cmd_pkt->task = TSK_SIMPLE;
1843
1844         /* Load SCSI command packet. */
1845         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1846         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1847
1848         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1849
1850         /* Build IOCB segments */
1851         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1852
1853         /* Set total data segment count. */
1854         cmd_pkt->entry_count = (uint8_t)req_cnt;
1855         wmb();
1856         /* Adjust ring index. */
1857         req->ring_index++;
1858         if (req->ring_index == req->length) {
1859                 req->ring_index = 0;
1860                 req->ring_ptr = req->ring;
1861         } else
1862                 req->ring_ptr++;
1863
1864         sp->flags |= SRB_DMA_VALID;
1865
1866         /* Set chip new ring index. */
1867         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1868
1869         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1870         return QLA_SUCCESS;
1871
1872 queuing_error:
1873         if (tot_dsds)
1874                 scsi_dma_unmap(cmd);
1875
1876         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1877
1878         return QLA_FUNCTION_FAILED;
1879 }
1880
1881
1882 /**
1883  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1884  * @sp: command to send to the ISP
1885  *
1886  * Returns non-zero if a failure occurred, else zero.
1887  */
1888 int
1889 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1890 {
1891         int                     nseg;
1892         unsigned long           flags;
1893         uint32_t                *clr_ptr;
1894         uint32_t                index;
1895         uint32_t                handle;
1896         uint16_t                cnt;
1897         uint16_t                req_cnt = 0;
1898         uint16_t                tot_dsds;
1899         uint16_t                tot_prot_dsds;
1900         uint16_t                fw_prot_opts = 0;
1901         struct req_que          *req = NULL;
1902         struct rsp_que          *rsp = NULL;
1903         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1904         struct scsi_qla_host    *vha = sp->fcport->vha;
1905         struct qla_hw_data      *ha = vha->hw;
1906         struct cmd_type_crc_2   *cmd_pkt;
1907         uint32_t                status = 0;
1908         struct qla_qpair        *qpair = sp->qpair;
1909
1910 #define QDSS_GOT_Q_SPACE        BIT_0
1911
1912         /* Check for host side state */
1913         if (!qpair->online) {
1914                 cmd->result = DID_NO_CONNECT << 16;
1915                 return QLA_INTERFACE_ERROR;
1916         }
1917
1918         if (!qpair->difdix_supported &&
1919                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1920                 cmd->result = DID_NO_CONNECT << 16;
1921                 return QLA_INTERFACE_ERROR;
1922         }
1923
1924         /* Only process protection or >16 cdb in this routine */
1925         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1926                 if (cmd->cmd_len <= 16)
1927                         return qla2xxx_start_scsi_mq(sp);
1928         }
1929
1930         spin_lock_irqsave(&qpair->qp_lock, flags);
1931
1932         /* Setup qpair pointers */
1933         rsp = qpair->rsp;
1934         req = qpair->req;
1935
1936         /* So we know we haven't pci_map'ed anything yet */
1937         tot_dsds = 0;
1938
1939         /* Send marker if required */
1940         if (vha->marker_needed != 0) {
1941                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1942                     QLA_SUCCESS) {
1943                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1944                         return QLA_FUNCTION_FAILED;
1945                 }
1946                 vha->marker_needed = 0;
1947         }
1948
1949         /* Check for room in outstanding command list. */
1950         handle = req->current_outstanding_cmd;
1951         for (index = 1; index < req->num_outstanding_cmds; index++) {
1952                 handle++;
1953                 if (handle == req->num_outstanding_cmds)
1954                         handle = 1;
1955                 if (!req->outstanding_cmds[handle])
1956                         break;
1957         }
1958
1959         if (index == req->num_outstanding_cmds)
1960                 goto queuing_error;
1961
1962         /* Compute number of required data segments */
1963         /* Map the sg table so we have an accurate count of sg entries needed */
1964         if (scsi_sg_count(cmd)) {
1965                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1966                     scsi_sg_count(cmd), cmd->sc_data_direction);
1967                 if (unlikely(!nseg))
1968                         goto queuing_error;
1969                 else
1970                         sp->flags |= SRB_DMA_VALID;
1971
1972                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1973                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1974                         struct qla2_sgx sgx;
1975                         uint32_t        partial;
1976
1977                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1978                         sgx.tot_bytes = scsi_bufflen(cmd);
1979                         sgx.cur_sg = scsi_sglist(cmd);
1980                         sgx.sp = sp;
1981
1982                         nseg = 0;
1983                         while (qla24xx_get_one_block_sg(
1984                             cmd->device->sector_size, &sgx, &partial))
1985                                 nseg++;
1986                 }
1987         } else
1988                 nseg = 0;
1989
1990         /* number of required data segments */
1991         tot_dsds = nseg;
1992
1993         /* Compute number of required protection segments */
1994         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1995                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1996                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1997                 if (unlikely(!nseg))
1998                         goto queuing_error;
1999                 else
2000                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2001
2002                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2003                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2004                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2005                 }
2006         } else {
2007                 nseg = 0;
2008         }
2009
2010         req_cnt = 1;
2011         /* Total Data and protection sg segment(s) */
2012         tot_prot_dsds = nseg;
2013         tot_dsds += nseg;
2014         if (req->cnt < (req_cnt + 2)) {
2015                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2016                     RD_REG_DWORD_RELAXED(req->req_q_out);
2017                 if (req->ring_index < cnt)
2018                         req->cnt = cnt - req->ring_index;
2019                 else
2020                         req->cnt = req->length -
2021                                 (req->ring_index - cnt);
2022                 if (req->cnt < (req_cnt + 2))
2023                         goto queuing_error;
2024         }
2025
2026         status |= QDSS_GOT_Q_SPACE;
2027
2028         /* Build header part of command packet (excluding the OPCODE). */
2029         req->current_outstanding_cmd = handle;
2030         req->outstanding_cmds[handle] = sp;
2031         sp->handle = handle;
2032         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2033         req->cnt -= req_cnt;
2034
2035         /* Fill-in common area */
2036         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2037         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2038
2039         clr_ptr = (uint32_t *)cmd_pkt + 2;
2040         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2041
2042         /* Set NPORT-ID and LUN number*/
2043         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2044         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2045         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2046         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2047
2048         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2049         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2050
2051         /* Total Data and protection segment(s) */
2052         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2053
2054         /* Build IOCB segments and adjust for data protection segments */
2055         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2056             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2057                 QLA_SUCCESS)
2058                 goto queuing_error;
2059
2060         cmd_pkt->entry_count = (uint8_t)req_cnt;
2061         cmd_pkt->timeout = cpu_to_le16(0);
2062         wmb();
2063
2064         /* Adjust ring index. */
2065         req->ring_index++;
2066         if (req->ring_index == req->length) {
2067                 req->ring_index = 0;
2068                 req->ring_ptr = req->ring;
2069         } else
2070                 req->ring_ptr++;
2071
2072         /* Set chip new ring index. */
2073         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2074
2075         /* Manage unprocessed RIO/ZIO commands in response queue. */
2076         if (vha->flags.process_response_queue &&
2077             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2078                 qla24xx_process_response_queue(vha, rsp);
2079
2080         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2081
2082         return QLA_SUCCESS;
2083
2084 queuing_error:
2085         if (status & QDSS_GOT_Q_SPACE) {
2086                 req->outstanding_cmds[handle] = NULL;
2087                 req->cnt += req_cnt;
2088         }
2089         /* Cleanup will be performed by the caller (queuecommand) */
2090
2091         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2092         return QLA_FUNCTION_FAILED;
2093 }
2094
2095 /* Generic Control-SRB manipulation functions. */
2096
2097 /* hardware_lock assumed to be held. */
2098
2099 void *
2100 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2101 {
2102         scsi_qla_host_t *vha = qpair->vha;
2103         struct qla_hw_data *ha = vha->hw;
2104         struct req_que *req = qpair->req;
2105         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2106         uint32_t index, handle;
2107         request_t *pkt;
2108         uint16_t cnt, req_cnt;
2109
2110         pkt = NULL;
2111         req_cnt = 1;
2112         handle = 0;
2113
2114         if (sp && (sp->type != SRB_SCSI_CMD)) {
2115                 /* Adjust entry-counts as needed. */
2116                 req_cnt = sp->iocbs;
2117         }
2118
2119         /* Check for room on request queue. */
2120         if (req->cnt < req_cnt + 2) {
2121                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2122                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2123                 else if (IS_P3P_TYPE(ha))
2124                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2125                 else if (IS_FWI2_CAPABLE(ha))
2126                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2127                 else if (IS_QLAFX00(ha))
2128                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2129                 else
2130                         cnt = qla2x00_debounce_register(
2131                             ISP_REQ_Q_OUT(ha, &reg->isp));
2132
2133                 if  (req->ring_index < cnt)
2134                         req->cnt = cnt - req->ring_index;
2135                 else
2136                         req->cnt = req->length -
2137                             (req->ring_index - cnt);
2138         }
2139         if (req->cnt < req_cnt + 2)
2140                 goto queuing_error;
2141
2142         if (sp) {
2143                 /* Check for room in outstanding command list. */
2144                 handle = req->current_outstanding_cmd;
2145                 for (index = 1; index < req->num_outstanding_cmds; index++) {
2146                         handle++;
2147                         if (handle == req->num_outstanding_cmds)
2148                                 handle = 1;
2149                         if (!req->outstanding_cmds[handle])
2150                                 break;
2151                 }
2152                 if (index == req->num_outstanding_cmds) {
2153                         ql_log(ql_log_warn, vha, 0x700b,
2154                             "No room on outstanding cmd array.\n");
2155                         goto queuing_error;
2156                 }
2157
2158                 /* Prep command array. */
2159                 req->current_outstanding_cmd = handle;
2160                 req->outstanding_cmds[handle] = sp;
2161                 sp->handle = handle;
2162         }
2163
2164         /* Prep packet */
2165         req->cnt -= req_cnt;
2166         pkt = req->ring_ptr;
2167         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2168         if (IS_QLAFX00(ha)) {
2169                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2170                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2171         } else {
2172                 pkt->entry_count = req_cnt;
2173                 pkt->handle = handle;
2174         }
2175
2176         return pkt;
2177
2178 queuing_error:
2179         qpair->tgt_counters.num_alloc_iocb_failed++;
2180         return pkt;
2181 }
2182
2183 void *
2184 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2185 {
2186         scsi_qla_host_t *vha = qpair->vha;
2187
2188         if (qla2x00_reset_active(vha))
2189                 return NULL;
2190
2191         return __qla2x00_alloc_iocbs(qpair, sp);
2192 }
2193
2194 void *
2195 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2196 {
2197         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2198 }
2199
2200 static void
2201 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2202 {
2203         struct srb_iocb *lio = &sp->u.iocb_cmd;
2204
2205         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2206         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2207         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2208                 logio->control_flags |= LCF_NVME_PRLI;
2209
2210         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2211         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2212         logio->port_id[1] = sp->fcport->d_id.b.area;
2213         logio->port_id[2] = sp->fcport->d_id.b.domain;
2214         logio->vp_index = sp->vha->vp_idx;
2215 }
2216
2217 static void
2218 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2219 {
2220         struct srb_iocb *lio = &sp->u.iocb_cmd;
2221
2222         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2223         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2224
2225         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2226                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2227         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2228                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2229         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2230         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2231         logio->port_id[1] = sp->fcport->d_id.b.area;
2232         logio->port_id[2] = sp->fcport->d_id.b.domain;
2233         logio->vp_index = sp->vha->vp_idx;
2234 }
2235
2236 static void
2237 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2238 {
2239         struct qla_hw_data *ha = sp->vha->hw;
2240         struct srb_iocb *lio = &sp->u.iocb_cmd;
2241         uint16_t opts;
2242
2243         mbx->entry_type = MBX_IOCB_TYPE;
2244         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2245         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2246         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2247         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2248         if (HAS_EXTENDED_IDS(ha)) {
2249                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2250                 mbx->mb10 = cpu_to_le16(opts);
2251         } else {
2252                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2253         }
2254         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2255         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2256             sp->fcport->d_id.b.al_pa);
2257         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2258 }
2259
2260 static void
2261 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2262 {
2263         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2264         logio->control_flags =
2265             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2266         if (!sp->fcport->se_sess ||
2267             !sp->fcport->keep_nport_handle)
2268                 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2269         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2270         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2271         logio->port_id[1] = sp->fcport->d_id.b.area;
2272         logio->port_id[2] = sp->fcport->d_id.b.domain;
2273         logio->vp_index = sp->vha->vp_idx;
2274 }
2275
2276 static void
2277 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2278 {
2279         struct qla_hw_data *ha = sp->vha->hw;
2280
2281         mbx->entry_type = MBX_IOCB_TYPE;
2282         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2283         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2284         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2285             cpu_to_le16(sp->fcport->loop_id):
2286             cpu_to_le16(sp->fcport->loop_id << 8);
2287         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2288         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2289             sp->fcport->d_id.b.al_pa);
2290         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2291         /* Implicit: mbx->mbx10 = 0. */
2292 }
2293
2294 static void
2295 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2296 {
2297         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2298         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2299         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2300         logio->vp_index = sp->vha->vp_idx;
2301 }
2302
2303 static void
2304 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2305 {
2306         struct qla_hw_data *ha = sp->vha->hw;
2307
2308         mbx->entry_type = MBX_IOCB_TYPE;
2309         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2310         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2311         if (HAS_EXTENDED_IDS(ha)) {
2312                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2313                 mbx->mb10 = cpu_to_le16(BIT_0);
2314         } else {
2315                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2316         }
2317         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2318         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2319         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2320         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2321         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2322 }
2323
2324 static void
2325 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2326 {
2327         uint32_t flags;
2328         uint64_t lun;
2329         struct fc_port *fcport = sp->fcport;
2330         scsi_qla_host_t *vha = fcport->vha;
2331         struct qla_hw_data *ha = vha->hw;
2332         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2333         struct req_que *req = vha->req;
2334
2335         flags = iocb->u.tmf.flags;
2336         lun = iocb->u.tmf.lun;
2337
2338         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2339         tsk->entry_count = 1;
2340         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2341         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2342         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2343         tsk->control_flags = cpu_to_le32(flags);
2344         tsk->port_id[0] = fcport->d_id.b.al_pa;
2345         tsk->port_id[1] = fcport->d_id.b.area;
2346         tsk->port_id[2] = fcport->d_id.b.domain;
2347         tsk->vp_index = fcport->vha->vp_idx;
2348
2349         if (flags == TCF_LUN_RESET) {
2350                 int_to_scsilun(lun, &tsk->lun);
2351                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2352                         sizeof(tsk->lun));
2353         }
2354 }
2355
2356 static void
2357 qla2x00_els_dcmd_sp_free(void *data)
2358 {
2359         srb_t *sp = data;
2360         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2361
2362         kfree(sp->fcport);
2363
2364         if (elsio->u.els_logo.els_logo_pyld)
2365                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2366                     elsio->u.els_logo.els_logo_pyld,
2367                     elsio->u.els_logo.els_logo_pyld_dma);
2368
2369         del_timer(&elsio->timer);
2370         qla2x00_rel_sp(sp);
2371 }
2372
2373 static void
2374 qla2x00_els_dcmd_iocb_timeout(void *data)
2375 {
2376         srb_t *sp = data;
2377         fc_port_t *fcport = sp->fcport;
2378         struct scsi_qla_host *vha = sp->vha;
2379         struct srb_iocb *lio = &sp->u.iocb_cmd;
2380
2381         ql_dbg(ql_dbg_io, vha, 0x3069,
2382             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2383             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2384             fcport->d_id.b.al_pa);
2385
2386         complete(&lio->u.els_logo.comp);
2387 }
2388
2389 static void
2390 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2391 {
2392         srb_t *sp = ptr;
2393         fc_port_t *fcport = sp->fcport;
2394         struct srb_iocb *lio = &sp->u.iocb_cmd;
2395         struct scsi_qla_host *vha = sp->vha;
2396
2397         ql_dbg(ql_dbg_io, vha, 0x3072,
2398             "%s hdl=%x, portid=%02x%02x%02x done\n",
2399             sp->name, sp->handle, fcport->d_id.b.domain,
2400             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2401
2402         complete(&lio->u.els_logo.comp);
2403 }
2404
2405 int
2406 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2407     port_id_t remote_did)
2408 {
2409         srb_t *sp;
2410         fc_port_t *fcport = NULL;
2411         struct srb_iocb *elsio = NULL;
2412         struct qla_hw_data *ha = vha->hw;
2413         struct els_logo_payload logo_pyld;
2414         int rval = QLA_SUCCESS;
2415
2416         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2417         if (!fcport) {
2418                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2419                return -ENOMEM;
2420         }
2421
2422         /* Alloc SRB structure */
2423         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2424         if (!sp) {
2425                 kfree(fcport);
2426                 ql_log(ql_log_info, vha, 0x70e6,
2427                  "SRB allocation failed\n");
2428                 return -ENOMEM;
2429         }
2430
2431         elsio = &sp->u.iocb_cmd;
2432         fcport->loop_id = 0xFFFF;
2433         fcport->d_id.b.domain = remote_did.b.domain;
2434         fcport->d_id.b.area = remote_did.b.area;
2435         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2436
2437         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2438             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2439
2440         sp->type = SRB_ELS_DCMD;
2441         sp->name = "ELS_DCMD";
2442         sp->fcport = fcport;
2443         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2444         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2445         sp->done = qla2x00_els_dcmd_sp_done;
2446         sp->free = qla2x00_els_dcmd_sp_free;
2447
2448         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2449                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2450                             GFP_KERNEL);
2451
2452         if (!elsio->u.els_logo.els_logo_pyld) {
2453                 sp->free(sp);
2454                 return QLA_FUNCTION_FAILED;
2455         }
2456
2457         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2458
2459         elsio->u.els_logo.els_cmd = els_opcode;
2460         logo_pyld.opcode = els_opcode;
2461         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2462         logo_pyld.s_id[1] = vha->d_id.b.area;
2463         logo_pyld.s_id[2] = vha->d_id.b.domain;
2464         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2465         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2466
2467         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2468             sizeof(struct els_logo_payload));
2469
2470         rval = qla2x00_start_sp(sp);
2471         if (rval != QLA_SUCCESS) {
2472                 sp->free(sp);
2473                 return QLA_FUNCTION_FAILED;
2474         }
2475
2476         ql_dbg(ql_dbg_io, vha, 0x3074,
2477             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2478             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2479             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2480
2481         wait_for_completion(&elsio->u.els_logo.comp);
2482
2483         sp->free(sp);
2484         return rval;
2485 }
2486
2487 static void
2488 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2489 {
2490         scsi_qla_host_t *vha = sp->vha;
2491         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2492
2493         els_iocb->entry_type = ELS_IOCB_TYPE;
2494         els_iocb->entry_count = 1;
2495         els_iocb->sys_define = 0;
2496         els_iocb->entry_status = 0;
2497         els_iocb->handle = sp->handle;
2498         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2499         els_iocb->tx_dsd_count = 1;
2500         els_iocb->vp_index = vha->vp_idx;
2501         els_iocb->sof_type = EST_SOFI3;
2502         els_iocb->rx_dsd_count = 0;
2503         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2504
2505         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2506         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2507         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2508         els_iocb->control_flags = 0;
2509
2510         els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2511         els_iocb->tx_address[0] =
2512             cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2513         els_iocb->tx_address[1] =
2514             cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2515         els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2516
2517         els_iocb->rx_byte_count = 0;
2518         els_iocb->rx_address[0] = 0;
2519         els_iocb->rx_address[1] = 0;
2520         els_iocb->rx_len = 0;
2521
2522         sp->vha->qla_stats.control_requests++;
2523 }
2524
2525 static void
2526 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2527 {
2528         struct bsg_job *bsg_job = sp->u.bsg_job;
2529         struct fc_bsg_request *bsg_request = bsg_job->request;
2530
2531         els_iocb->entry_type = ELS_IOCB_TYPE;
2532         els_iocb->entry_count = 1;
2533         els_iocb->sys_define = 0;
2534         els_iocb->entry_status = 0;
2535         els_iocb->handle = sp->handle;
2536         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2537         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2538         els_iocb->vp_index = sp->vha->vp_idx;
2539         els_iocb->sof_type = EST_SOFI3;
2540         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2541
2542         els_iocb->opcode =
2543             sp->type == SRB_ELS_CMD_RPT ?
2544             bsg_request->rqst_data.r_els.els_code :
2545             bsg_request->rqst_data.h_els.command_code;
2546         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2547         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2548         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2549         els_iocb->control_flags = 0;
2550         els_iocb->rx_byte_count =
2551             cpu_to_le32(bsg_job->reply_payload.payload_len);
2552         els_iocb->tx_byte_count =
2553             cpu_to_le32(bsg_job->request_payload.payload_len);
2554
2555         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2556             (bsg_job->request_payload.sg_list)));
2557         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2558             (bsg_job->request_payload.sg_list)));
2559         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2560             (bsg_job->request_payload.sg_list));
2561
2562         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2563             (bsg_job->reply_payload.sg_list)));
2564         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2565             (bsg_job->reply_payload.sg_list)));
2566         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2567             (bsg_job->reply_payload.sg_list));
2568
2569         sp->vha->qla_stats.control_requests++;
2570 }
2571
2572 static void
2573 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2574 {
2575         uint16_t        avail_dsds;
2576         uint32_t        *cur_dsd;
2577         struct scatterlist *sg;
2578         int index;
2579         uint16_t tot_dsds;
2580         scsi_qla_host_t *vha = sp->vha;
2581         struct qla_hw_data *ha = vha->hw;
2582         struct bsg_job *bsg_job = sp->u.bsg_job;
2583         int loop_iterartion = 0;
2584         int entry_count = 1;
2585
2586         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2587         ct_iocb->entry_type = CT_IOCB_TYPE;
2588         ct_iocb->entry_status = 0;
2589         ct_iocb->handle1 = sp->handle;
2590         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2591         ct_iocb->status = cpu_to_le16(0);
2592         ct_iocb->control_flags = cpu_to_le16(0);
2593         ct_iocb->timeout = 0;
2594         ct_iocb->cmd_dsd_count =
2595             cpu_to_le16(bsg_job->request_payload.sg_cnt);
2596         ct_iocb->total_dsd_count =
2597             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2598         ct_iocb->req_bytecount =
2599             cpu_to_le32(bsg_job->request_payload.payload_len);
2600         ct_iocb->rsp_bytecount =
2601             cpu_to_le32(bsg_job->reply_payload.payload_len);
2602
2603         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2604             (bsg_job->request_payload.sg_list)));
2605         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2606             (bsg_job->request_payload.sg_list)));
2607         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2608
2609         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2610             (bsg_job->reply_payload.sg_list)));
2611         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2612             (bsg_job->reply_payload.sg_list)));
2613         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2614
2615         avail_dsds = 1;
2616         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2617         index = 0;
2618         tot_dsds = bsg_job->reply_payload.sg_cnt;
2619
2620         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2621                 dma_addr_t       sle_dma;
2622                 cont_a64_entry_t *cont_pkt;
2623
2624                 /* Allocate additional continuation packets? */
2625                 if (avail_dsds == 0) {
2626                         /*
2627                         * Five DSDs are available in the Cont.
2628                         * Type 1 IOCB.
2629                                */
2630                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2631                             vha->hw->req_q_map[0]);
2632                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2633                         avail_dsds = 5;
2634                         entry_count++;
2635                 }
2636
2637                 sle_dma = sg_dma_address(sg);
2638                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2639                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2640                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2641                 loop_iterartion++;
2642                 avail_dsds--;
2643         }
2644         ct_iocb->entry_count = entry_count;
2645
2646         sp->vha->qla_stats.control_requests++;
2647 }
2648
2649 static void
2650 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2651 {
2652         uint16_t        avail_dsds;
2653         uint32_t        *cur_dsd;
2654         struct scatterlist *sg;
2655         int index;
2656         uint16_t cmd_dsds, rsp_dsds;
2657         scsi_qla_host_t *vha = sp->vha;
2658         struct qla_hw_data *ha = vha->hw;
2659         struct bsg_job *bsg_job = sp->u.bsg_job;
2660         int entry_count = 1;
2661         cont_a64_entry_t *cont_pkt = NULL;
2662
2663         ct_iocb->entry_type = CT_IOCB_TYPE;
2664         ct_iocb->entry_status = 0;
2665         ct_iocb->sys_define = 0;
2666         ct_iocb->handle = sp->handle;
2667
2668         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2669         ct_iocb->vp_index = sp->vha->vp_idx;
2670         ct_iocb->comp_status = cpu_to_le16(0);
2671
2672         cmd_dsds = bsg_job->request_payload.sg_cnt;
2673         rsp_dsds = bsg_job->reply_payload.sg_cnt;
2674
2675         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2676         ct_iocb->timeout = 0;
2677         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2678         ct_iocb->cmd_byte_count =
2679             cpu_to_le32(bsg_job->request_payload.payload_len);
2680
2681         avail_dsds = 2;
2682         cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2683         index = 0;
2684
2685         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2686                 dma_addr_t       sle_dma;
2687
2688                 /* Allocate additional continuation packets? */
2689                 if (avail_dsds == 0) {
2690                         /*
2691                          * Five DSDs are available in the Cont.
2692                          * Type 1 IOCB.
2693                          */
2694                         cont_pkt = qla2x00_prep_cont_type1_iocb(
2695                             vha, ha->req_q_map[0]);
2696                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2697                         avail_dsds = 5;
2698                         entry_count++;
2699                 }
2700
2701                 sle_dma = sg_dma_address(sg);
2702                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2703                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2704                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2705                 avail_dsds--;
2706         }
2707
2708         index = 0;
2709
2710         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2711                 dma_addr_t       sle_dma;
2712
2713                 /* Allocate additional continuation packets? */
2714                 if (avail_dsds == 0) {
2715                         /*
2716                         * Five DSDs are available in the Cont.
2717                         * Type 1 IOCB.
2718                                */
2719                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2720                             ha->req_q_map[0]);
2721                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2722                         avail_dsds = 5;
2723                         entry_count++;
2724                 }
2725
2726                 sle_dma = sg_dma_address(sg);
2727                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2728                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2729                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2730                 avail_dsds--;
2731         }
2732         ct_iocb->entry_count = entry_count;
2733 }
2734
2735 /*
2736  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2737  * @sp: command to send to the ISP
2738  *
2739  * Returns non-zero if a failure occurred, else zero.
2740  */
2741 int
2742 qla82xx_start_scsi(srb_t *sp)
2743 {
2744         int             nseg;
2745         unsigned long   flags;
2746         struct scsi_cmnd *cmd;
2747         uint32_t        *clr_ptr;
2748         uint32_t        index;
2749         uint32_t        handle;
2750         uint16_t        cnt;
2751         uint16_t        req_cnt;
2752         uint16_t        tot_dsds;
2753         struct device_reg_82xx __iomem *reg;
2754         uint32_t dbval;
2755         uint32_t *fcp_dl;
2756         uint8_t additional_cdb_len;
2757         struct ct6_dsd *ctx;
2758         struct scsi_qla_host *vha = sp->vha;
2759         struct qla_hw_data *ha = vha->hw;
2760         struct req_que *req = NULL;
2761         struct rsp_que *rsp = NULL;
2762
2763         /* Setup device pointers. */
2764         reg = &ha->iobase->isp82;
2765         cmd = GET_CMD_SP(sp);
2766         req = vha->req;
2767         rsp = ha->rsp_q_map[0];
2768
2769         /* So we know we haven't pci_map'ed anything yet */
2770         tot_dsds = 0;
2771
2772         dbval = 0x04 | (ha->portnum << 5);
2773
2774         /* Send marker if required */
2775         if (vha->marker_needed != 0) {
2776                 if (qla2x00_marker(vha, req,
2777                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2778                         ql_log(ql_log_warn, vha, 0x300c,
2779                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2780                         return QLA_FUNCTION_FAILED;
2781                 }
2782                 vha->marker_needed = 0;
2783         }
2784
2785         /* Acquire ring specific lock */
2786         spin_lock_irqsave(&ha->hardware_lock, flags);
2787
2788         /* Check for room in outstanding command list. */
2789         handle = req->current_outstanding_cmd;
2790         for (index = 1; index < req->num_outstanding_cmds; index++) {
2791                 handle++;
2792                 if (handle == req->num_outstanding_cmds)
2793                         handle = 1;
2794                 if (!req->outstanding_cmds[handle])
2795                         break;
2796         }
2797         if (index == req->num_outstanding_cmds)
2798                 goto queuing_error;
2799
2800         /* Map the sg table so we have an accurate count of sg entries needed */
2801         if (scsi_sg_count(cmd)) {
2802                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2803                     scsi_sg_count(cmd), cmd->sc_data_direction);
2804                 if (unlikely(!nseg))
2805                         goto queuing_error;
2806         } else
2807                 nseg = 0;
2808
2809         tot_dsds = nseg;
2810
2811         if (tot_dsds > ql2xshiftctondsd) {
2812                 struct cmd_type_6 *cmd_pkt;
2813                 uint16_t more_dsd_lists = 0;
2814                 struct dsd_dma *dsd_ptr;
2815                 uint16_t i;
2816
2817                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2818                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2819                         ql_dbg(ql_dbg_io, vha, 0x300d,
2820                             "Num of DSD list %d is than %d for cmd=%p.\n",
2821                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2822                             cmd);
2823                         goto queuing_error;
2824                 }
2825
2826                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2827                         goto sufficient_dsds;
2828                 else
2829                         more_dsd_lists -= ha->gbl_dsd_avail;
2830
2831                 for (i = 0; i < more_dsd_lists; i++) {
2832                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2833                         if (!dsd_ptr) {
2834                                 ql_log(ql_log_fatal, vha, 0x300e,
2835                                     "Failed to allocate memory for dsd_dma "
2836                                     "for cmd=%p.\n", cmd);
2837                                 goto queuing_error;
2838                         }
2839
2840                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2841                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2842                         if (!dsd_ptr->dsd_addr) {
2843                                 kfree(dsd_ptr);
2844                                 ql_log(ql_log_fatal, vha, 0x300f,
2845                                     "Failed to allocate memory for dsd_addr "
2846                                     "for cmd=%p.\n", cmd);
2847                                 goto queuing_error;
2848                         }
2849                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2850                         ha->gbl_dsd_avail++;
2851                 }
2852
2853 sufficient_dsds:
2854                 req_cnt = 1;
2855
2856                 if (req->cnt < (req_cnt + 2)) {
2857                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2858                                 &reg->req_q_out[0]);
2859                         if (req->ring_index < cnt)
2860                                 req->cnt = cnt - req->ring_index;
2861                         else
2862                                 req->cnt = req->length -
2863                                         (req->ring_index - cnt);
2864                         if (req->cnt < (req_cnt + 2))
2865                                 goto queuing_error;
2866                 }
2867
2868                 ctx = sp->u.scmd.ctx =
2869                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2870                 if (!ctx) {
2871                         ql_log(ql_log_fatal, vha, 0x3010,
2872                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2873                         goto queuing_error;
2874                 }
2875
2876                 memset(ctx, 0, sizeof(struct ct6_dsd));
2877                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2878                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2879                 if (!ctx->fcp_cmnd) {
2880                         ql_log(ql_log_fatal, vha, 0x3011,
2881                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2882                         goto queuing_error;
2883                 }
2884
2885                 /* Initialize the DSD list and dma handle */
2886                 INIT_LIST_HEAD(&ctx->dsd_list);
2887                 ctx->dsd_use_cnt = 0;
2888
2889                 if (cmd->cmd_len > 16) {
2890                         additional_cdb_len = cmd->cmd_len - 16;
2891                         if ((cmd->cmd_len % 4) != 0) {
2892                                 /* SCSI command bigger than 16 bytes must be
2893                                  * multiple of 4
2894                                  */
2895                                 ql_log(ql_log_warn, vha, 0x3012,
2896                                     "scsi cmd len %d not multiple of 4 "
2897                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2898                                 goto queuing_error_fcp_cmnd;
2899                         }
2900                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2901                 } else {
2902                         additional_cdb_len = 0;
2903                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2904                 }
2905
2906                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2907                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2908
2909                 /* Zero out remaining portion of packet. */
2910                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2911                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2912                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2913                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2914
2915                 /* Set NPORT-ID and LUN number*/
2916                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2917                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2918                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2919                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2920                 cmd_pkt->vp_index = sp->vha->vp_idx;
2921
2922                 /* Build IOCB segments */
2923                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2924                         goto queuing_error_fcp_cmnd;
2925
2926                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2927                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2928
2929                 /* build FCP_CMND IU */
2930                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2931                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2932                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2933
2934                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2935                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2936                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2937                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2938
2939                 /* Populate the FCP_PRIO. */
2940                 if (ha->flags.fcp_prio_enabled)
2941                         ctx->fcp_cmnd->task_attribute |=
2942                             sp->fcport->fcp_prio << 3;
2943
2944                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2945
2946                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2947                     additional_cdb_len);
2948                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2949
2950                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2951                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2952                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2953                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2954                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2955
2956                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2957                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2958                 /* Set total data segment count. */
2959                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2960                 /* Specify response queue number where
2961                  * completion should happen
2962                  */
2963                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2964         } else {
2965                 struct cmd_type_7 *cmd_pkt;
2966                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2967                 if (req->cnt < (req_cnt + 2)) {
2968                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2969                             &reg->req_q_out[0]);
2970                         if (req->ring_index < cnt)
2971                                 req->cnt = cnt - req->ring_index;
2972                         else
2973                                 req->cnt = req->length -
2974                                         (req->ring_index - cnt);
2975                 }
2976                 if (req->cnt < (req_cnt + 2))
2977                         goto queuing_error;
2978
2979                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2980                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2981
2982                 /* Zero out remaining portion of packet. */
2983                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2984                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2985                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2986                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2987
2988                 /* Set NPORT-ID and LUN number*/
2989                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2990                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2991                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2992                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2993                 cmd_pkt->vp_index = sp->vha->vp_idx;
2994
2995                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2996                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2997                     sizeof(cmd_pkt->lun));
2998
2999                 /* Populate the FCP_PRIO. */
3000                 if (ha->flags.fcp_prio_enabled)
3001                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3002
3003                 /* Load SCSI command packet. */
3004                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3005                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3006
3007                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3008
3009                 /* Build IOCB segments */
3010                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3011
3012                 /* Set total data segment count. */
3013                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3014                 /* Specify response queue number where
3015                  * completion should happen.
3016                  */
3017                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3018
3019         }
3020         /* Build command packet. */
3021         req->current_outstanding_cmd = handle;
3022         req->outstanding_cmds[handle] = sp;
3023         sp->handle = handle;
3024         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3025         req->cnt -= req_cnt;
3026         wmb();
3027
3028         /* Adjust ring index. */
3029         req->ring_index++;
3030         if (req->ring_index == req->length) {
3031                 req->ring_index = 0;
3032                 req->ring_ptr = req->ring;
3033         } else
3034                 req->ring_ptr++;
3035
3036         sp->flags |= SRB_DMA_VALID;
3037
3038         /* Set chip new ring index. */
3039         /* write, read and verify logic */
3040         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3041         if (ql2xdbwr)
3042                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3043         else {
3044                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3045                 wmb();
3046                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3047                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3048                         wmb();
3049                 }
3050         }
3051
3052         /* Manage unprocessed RIO/ZIO commands in response queue. */
3053         if (vha->flags.process_response_queue &&
3054             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3055                 qla24xx_process_response_queue(vha, rsp);
3056
3057         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3058         return QLA_SUCCESS;
3059
3060 queuing_error_fcp_cmnd:
3061         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3062 queuing_error:
3063         if (tot_dsds)
3064                 scsi_dma_unmap(cmd);
3065
3066         if (sp->u.scmd.ctx) {
3067                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3068                 sp->u.scmd.ctx = NULL;
3069         }
3070         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3071
3072         return QLA_FUNCTION_FAILED;
3073 }
3074
3075 static void
3076 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3077 {
3078         struct srb_iocb *aio = &sp->u.iocb_cmd;
3079         scsi_qla_host_t *vha = sp->vha;
3080         struct req_que *req = vha->req;
3081
3082         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3083         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3084         abt_iocb->entry_count = 1;
3085         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3086         abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3087         abt_iocb->handle_to_abort =
3088             cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3089         abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3090         abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3091         abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3092         abt_iocb->vp_index = vha->vp_idx;
3093         abt_iocb->req_que_no = cpu_to_le16(req->id);
3094         /* Send the command to the firmware */
3095         wmb();
3096 }
3097
3098 static void
3099 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3100 {
3101         int i, sz;
3102
3103         mbx->entry_type = MBX_IOCB_TYPE;
3104         mbx->handle = sp->handle;
3105         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3106
3107         for (i = 0; i < sz; i++)
3108                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3109 }
3110
3111 static void
3112 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3113 {
3114         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3115         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3116         ct_pkt->handle = sp->handle;
3117 }
3118
3119 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3120         struct nack_to_isp *nack)
3121 {
3122         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3123
3124         nack->entry_type = NOTIFY_ACK_TYPE;
3125         nack->entry_count = 1;
3126         nack->ox_id = ntfy->ox_id;
3127
3128         nack->u.isp24.handle = sp->handle;
3129         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3130         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3131                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3132                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3133         }
3134         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3135         nack->u.isp24.status = ntfy->u.isp24.status;
3136         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3137         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3138         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3139         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3140         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3141         nack->u.isp24.srr_flags = 0;
3142         nack->u.isp24.srr_reject_code = 0;
3143         nack->u.isp24.srr_reject_code_expl = 0;
3144         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3145 }
3146
3147 /*
3148  * Build NVME LS request
3149  */
3150 static int
3151 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3152 {
3153         struct srb_iocb *nvme;
3154         int     rval = QLA_SUCCESS;
3155
3156         nvme = &sp->u.iocb_cmd;
3157         cmd_pkt->entry_type = PT_LS4_REQUEST;
3158         cmd_pkt->entry_count = 1;
3159         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3160
3161         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3162         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3163         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3164
3165         cmd_pkt->tx_dseg_count = 1;
3166         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3167         cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3168         cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3169         cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3170
3171         cmd_pkt->rx_dseg_count = 1;
3172         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3173         cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
3174         cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3175         cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3176
3177         return rval;
3178 }
3179
3180 int
3181 qla2x00_start_sp(srb_t *sp)
3182 {
3183         int rval;
3184         scsi_qla_host_t *vha = sp->vha;
3185         struct qla_hw_data *ha = vha->hw;
3186         void *pkt;
3187         unsigned long flags;
3188
3189         rval = QLA_FUNCTION_FAILED;
3190         spin_lock_irqsave(&ha->hardware_lock, flags);
3191         pkt = qla2x00_alloc_iocbs(vha, sp);
3192         if (!pkt) {
3193                 ql_log(ql_log_warn, vha, 0x700c,
3194                     "qla2x00_alloc_iocbs failed.\n");
3195                 goto done;
3196         }
3197
3198         rval = QLA_SUCCESS;
3199         switch (sp->type) {
3200         case SRB_LOGIN_CMD:
3201                 IS_FWI2_CAPABLE(ha) ?
3202                     qla24xx_login_iocb(sp, pkt) :
3203                     qla2x00_login_iocb(sp, pkt);
3204                 break;
3205         case SRB_PRLI_CMD:
3206                 qla24xx_prli_iocb(sp, pkt);
3207                 break;
3208         case SRB_LOGOUT_CMD:
3209                 IS_FWI2_CAPABLE(ha) ?
3210                     qla24xx_logout_iocb(sp, pkt) :
3211                     qla2x00_logout_iocb(sp, pkt);
3212                 break;
3213         case SRB_ELS_CMD_RPT:
3214         case SRB_ELS_CMD_HST:
3215                 qla24xx_els_iocb(sp, pkt);
3216                 break;
3217         case SRB_CT_CMD:
3218                 IS_FWI2_CAPABLE(ha) ?
3219                     qla24xx_ct_iocb(sp, pkt) :
3220                     qla2x00_ct_iocb(sp, pkt);
3221                 break;
3222         case SRB_ADISC_CMD:
3223                 IS_FWI2_CAPABLE(ha) ?
3224                     qla24xx_adisc_iocb(sp, pkt) :
3225                     qla2x00_adisc_iocb(sp, pkt);
3226                 break;
3227         case SRB_TM_CMD:
3228                 IS_QLAFX00(ha) ?
3229                     qlafx00_tm_iocb(sp, pkt) :
3230                     qla24xx_tm_iocb(sp, pkt);
3231                 break;
3232         case SRB_FXIOCB_DCMD:
3233         case SRB_FXIOCB_BCMD:
3234                 qlafx00_fxdisc_iocb(sp, pkt);
3235                 break;
3236         case SRB_NVME_LS:
3237                 qla_nvme_ls(sp, pkt);
3238                 break;
3239         case SRB_ABT_CMD:
3240                 IS_QLAFX00(ha) ?
3241                         qlafx00_abort_iocb(sp, pkt) :
3242                         qla24xx_abort_iocb(sp, pkt);
3243                 break;
3244         case SRB_ELS_DCMD:
3245                 qla24xx_els_logo_iocb(sp, pkt);
3246                 break;
3247         case SRB_CT_PTHRU_CMD:
3248                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3249                 break;
3250         case SRB_MB_IOCB:
3251                 qla2x00_mb_iocb(sp, pkt);
3252                 break;
3253         case SRB_NACK_PLOGI:
3254         case SRB_NACK_PRLI:
3255         case SRB_NACK_LOGO:
3256                 qla2x00_send_notify_ack_iocb(sp, pkt);
3257                 break;
3258         default:
3259                 break;
3260         }
3261
3262         wmb();
3263         qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3264 done:
3265         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3266         return rval;
3267 }
3268
3269 static void
3270 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3271                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3272 {
3273         uint16_t avail_dsds;
3274         uint32_t *cur_dsd;
3275         uint32_t req_data_len = 0;
3276         uint32_t rsp_data_len = 0;
3277         struct scatterlist *sg;
3278         int index;
3279         int entry_count = 1;
3280         struct bsg_job *bsg_job = sp->u.bsg_job;
3281
3282         /*Update entry type to indicate bidir command */
3283         *((uint32_t *)(&cmd_pkt->entry_type)) =
3284                 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3285
3286         /* Set the transfer direction, in this set both flags
3287          * Also set the BD_WRAP_BACK flag, firmware will take care
3288          * assigning DID=SID for outgoing pkts.
3289          */
3290         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3291         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3292         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3293                                                         BD_WRAP_BACK);
3294
3295         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3296         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3297         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3298         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3299
3300         vha->bidi_stats.transfer_bytes += req_data_len;
3301         vha->bidi_stats.io_count++;
3302
3303         vha->qla_stats.output_bytes += req_data_len;
3304         vha->qla_stats.output_requests++;
3305
3306         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3307          * are bundled in continuation iocb
3308          */
3309         avail_dsds = 1;
3310         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3311
3312         index = 0;
3313
3314         for_each_sg(bsg_job->request_payload.sg_list, sg,
3315                                 bsg_job->request_payload.sg_cnt, index) {
3316                 dma_addr_t sle_dma;
3317                 cont_a64_entry_t *cont_pkt;
3318
3319                 /* Allocate additional continuation packets */
3320                 if (avail_dsds == 0) {
3321                         /* Continuation type 1 IOCB can accomodate
3322                          * 5 DSDS
3323                          */
3324                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3325                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3326                         avail_dsds = 5;
3327                         entry_count++;
3328                 }
3329                 sle_dma = sg_dma_address(sg);
3330                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3331                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3332                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3333                 avail_dsds--;
3334         }
3335         /* For read request DSD will always goes to continuation IOCB
3336          * and follow the write DSD. If there is room on the current IOCB
3337          * then it is added to that IOCB else new continuation IOCB is
3338          * allocated.
3339          */
3340         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3341                                 bsg_job->reply_payload.sg_cnt, index) {
3342                 dma_addr_t sle_dma;
3343                 cont_a64_entry_t *cont_pkt;
3344
3345                 /* Allocate additional continuation packets */
3346                 if (avail_dsds == 0) {
3347                         /* Continuation type 1 IOCB can accomodate
3348                          * 5 DSDS
3349                          */
3350                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3351                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3352                         avail_dsds = 5;
3353                         entry_count++;
3354                 }
3355                 sle_dma = sg_dma_address(sg);
3356                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3357                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3358                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3359                 avail_dsds--;
3360         }
3361         /* This value should be same as number of IOCB required for this cmd */
3362         cmd_pkt->entry_count = entry_count;
3363 }
3364
3365 int
3366 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3367 {
3368
3369         struct qla_hw_data *ha = vha->hw;
3370         unsigned long flags;
3371         uint32_t handle;
3372         uint32_t index;
3373         uint16_t req_cnt;
3374         uint16_t cnt;
3375         uint32_t *clr_ptr;
3376         struct cmd_bidir *cmd_pkt = NULL;
3377         struct rsp_que *rsp;
3378         struct req_que *req;
3379         int rval = EXT_STATUS_OK;
3380
3381         rval = QLA_SUCCESS;
3382
3383         rsp = ha->rsp_q_map[0];
3384         req = vha->req;
3385
3386         /* Send marker if required */
3387         if (vha->marker_needed != 0) {
3388                 if (qla2x00_marker(vha, req,
3389                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3390                         return EXT_STATUS_MAILBOX;
3391                 vha->marker_needed = 0;
3392         }
3393
3394         /* Acquire ring specific lock */
3395         spin_lock_irqsave(&ha->hardware_lock, flags);
3396
3397         /* Check for room in outstanding command list. */
3398         handle = req->current_outstanding_cmd;
3399         for (index = 1; index < req->num_outstanding_cmds; index++) {
3400                 handle++;
3401                 if (handle == req->num_outstanding_cmds)
3402                         handle = 1;
3403                 if (!req->outstanding_cmds[handle])
3404                         break;
3405         }
3406
3407         if (index == req->num_outstanding_cmds) {
3408                 rval = EXT_STATUS_BUSY;
3409                 goto queuing_error;
3410         }
3411
3412         /* Calculate number of IOCB required */
3413         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3414
3415         /* Check for room on request queue. */
3416         if (req->cnt < req_cnt + 2) {
3417                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3418                     RD_REG_DWORD_RELAXED(req->req_q_out);
3419                 if  (req->ring_index < cnt)
3420                         req->cnt = cnt - req->ring_index;
3421                 else
3422                         req->cnt = req->length -
3423                                 (req->ring_index - cnt);
3424         }
3425         if (req->cnt < req_cnt + 2) {
3426                 rval = EXT_STATUS_BUSY;
3427                 goto queuing_error;
3428         }
3429
3430         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3431         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3432
3433         /* Zero out remaining portion of packet. */
3434         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3435         clr_ptr = (uint32_t *)cmd_pkt + 2;
3436         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3437
3438         /* Set NPORT-ID  (of vha)*/
3439         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3440         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3441         cmd_pkt->port_id[1] = vha->d_id.b.area;
3442         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3443
3444         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3445         cmd_pkt->entry_status = (uint8_t) rsp->id;
3446         /* Build command packet. */
3447         req->current_outstanding_cmd = handle;
3448         req->outstanding_cmds[handle] = sp;
3449         sp->handle = handle;
3450         req->cnt -= req_cnt;
3451
3452         /* Send the command to the firmware */
3453         wmb();
3454         qla2x00_start_iocbs(vha, req);
3455 queuing_error:
3456         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3457         return rval;
3458 }