GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @sp: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @vha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @vha: HA context
118  * @req: request queue
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         /* Adjust ring index. */
128         req->ring_index++;
129         if (req->ring_index == req->length) {
130                 req->ring_index = 0;
131                 req->ring_ptr = req->ring;
132         } else {
133                 req->ring_ptr++;
134         }
135
136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138         /* Load packet defaults. */
139         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
140             cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141             cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
150         uint8_t guard = scsi_host_get_guard(cmd->device->host);
151
152         /* We always use DIFF Bundling for best performance */
153         *fw_prot_opts = 0;
154
155         /* Translate SCSI opcode to a protection opcode */
156         switch (scsi_get_prot_op(cmd)) {
157         case SCSI_PROT_READ_STRIP:
158                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159                 break;
160         case SCSI_PROT_WRITE_INSERT:
161                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162                 break;
163         case SCSI_PROT_READ_INSERT:
164                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165                 break;
166         case SCSI_PROT_WRITE_STRIP:
167                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168                 break;
169         case SCSI_PROT_READ_PASS:
170         case SCSI_PROT_WRITE_PASS:
171                 if (guard & SHOST_DIX_GUARD_IP)
172                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173                 else
174                         *fw_prot_opts |= PO_MODE_DIF_PASS;
175                 break;
176         default:        /* Normal Request */
177                 *fw_prot_opts |= PO_MODE_DIF_PASS;
178                 break;
179         }
180
181         return scsi_prot_sg_count(cmd);
182 }
183
184 /*
185  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186  * capable IOCB types.
187  *
188  * @sp: SRB command to process
189  * @cmd_pkt: Command type 2 IOCB
190  * @tot_dsds: Total number of segments to transfer
191  */
192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
193     uint16_t tot_dsds)
194 {
195         uint16_t        avail_dsds;
196         uint32_t        *cur_dsd;
197         scsi_qla_host_t *vha;
198         struct scsi_cmnd *cmd;
199         struct scatterlist *sg;
200         int i;
201
202         cmd = GET_CMD_SP(sp);
203
204         /* Update entry type to indicate Command Type 2 IOCB */
205         *((uint32_t *)(&cmd_pkt->entry_type)) =
206             cpu_to_le32(COMMAND_TYPE);
207
208         /* No data transfer */
209         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
210                 cmd_pkt->byte_count = cpu_to_le32(0);
211                 return;
212         }
213
214         vha = sp->vha;
215         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216
217         /* Three DSDs are available in the Command Type 2 IOCB */
218         avail_dsds = 3;
219         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220
221         /* Load data segments */
222         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223                 cont_entry_t *cont_pkt;
224
225                 /* Allocate additional continuation packets? */
226                 if (avail_dsds == 0) {
227                         /*
228                          * Seven DSDs are available in the Continuation
229                          * Type 0 IOCB.
230                          */
231                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
233                         avail_dsds = 7;
234                 }
235
236                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
238                 avail_dsds--;
239         }
240 }
241
242 /**
243  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244  * capable IOCB types.
245  *
246  * @sp: SRB command to process
247  * @cmd_pkt: Command type 3 IOCB
248  * @tot_dsds: Total number of segments to transfer
249  */
250 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
251     uint16_t tot_dsds)
252 {
253         uint16_t        avail_dsds;
254         uint32_t        *cur_dsd;
255         scsi_qla_host_t *vha;
256         struct scsi_cmnd *cmd;
257         struct scatterlist *sg;
258         int i;
259
260         cmd = GET_CMD_SP(sp);
261
262         /* Update entry type to indicate Command Type 3 IOCB */
263         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
264
265         /* No data transfer */
266         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267                 cmd_pkt->byte_count = cpu_to_le32(0);
268                 return;
269         }
270
271         vha = sp->vha;
272         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274         /* Two DSDs are available in the Command Type 3 IOCB */
275         avail_dsds = 2;
276         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278         /* Load data segments */
279         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280                 dma_addr_t      sle_dma;
281                 cont_a64_entry_t *cont_pkt;
282
283                 /* Allocate additional continuation packets? */
284                 if (avail_dsds == 0) {
285                         /*
286                          * Five DSDs are available in the Continuation
287                          * Type 1 IOCB.
288                          */
289                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291                         avail_dsds = 5;
292                 }
293
294                 sle_dma = sg_dma_address(sg);
295                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298                 avail_dsds--;
299         }
300 }
301
302 /**
303  * qla2x00_start_scsi() - Send a SCSI command to the ISP
304  * @sp: command to send to the ISP
305  *
306  * Returns non-zero if a failure occurred, else zero.
307  */
308 int
309 qla2x00_start_scsi(srb_t *sp)
310 {
311         int             nseg;
312         unsigned long   flags;
313         scsi_qla_host_t *vha;
314         struct scsi_cmnd *cmd;
315         uint32_t        *clr_ptr;
316         uint32_t        index;
317         uint32_t        handle;
318         cmd_entry_t     *cmd_pkt;
319         uint16_t        cnt;
320         uint16_t        req_cnt;
321         uint16_t        tot_dsds;
322         struct device_reg_2xxx __iomem *reg;
323         struct qla_hw_data *ha;
324         struct req_que *req;
325         struct rsp_que *rsp;
326
327         /* Setup device pointers. */
328         vha = sp->vha;
329         ha = vha->hw;
330         reg = &ha->iobase->isp;
331         cmd = GET_CMD_SP(sp);
332         req = ha->req_q_map[0];
333         rsp = ha->rsp_q_map[0];
334         /* So we know we haven't pci_map'ed anything yet */
335         tot_dsds = 0;
336
337         /* Send marker if required */
338         if (vha->marker_needed != 0) {
339                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
340                     QLA_SUCCESS) {
341                         return (QLA_FUNCTION_FAILED);
342                 }
343                 vha->marker_needed = 0;
344         }
345
346         /* Acquire ring specific lock */
347         spin_lock_irqsave(&ha->hardware_lock, flags);
348
349         /* Check for room in outstanding command list. */
350         handle = req->current_outstanding_cmd;
351         for (index = 1; index < req->num_outstanding_cmds; index++) {
352                 handle++;
353                 if (handle == req->num_outstanding_cmds)
354                         handle = 1;
355                 if (!req->outstanding_cmds[handle])
356                         break;
357         }
358         if (index == req->num_outstanding_cmds)
359                 goto queuing_error;
360
361         /* Map the sg table so we have an accurate count of sg entries needed */
362         if (scsi_sg_count(cmd)) {
363                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
364                     scsi_sg_count(cmd), cmd->sc_data_direction);
365                 if (unlikely(!nseg))
366                         goto queuing_error;
367         } else
368                 nseg = 0;
369
370         tot_dsds = nseg;
371
372         /* Calculate the number of request entries needed. */
373         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
374         if (req->cnt < (req_cnt + 2)) {
375                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
376                 if (req->ring_index < cnt)
377                         req->cnt = cnt - req->ring_index;
378                 else
379                         req->cnt = req->length -
380                             (req->ring_index - cnt);
381                 /* If still no head room then bail out */
382                 if (req->cnt < (req_cnt + 2))
383                         goto queuing_error;
384         }
385
386         /* Build command packet */
387         req->current_outstanding_cmd = handle;
388         req->outstanding_cmds[handle] = sp;
389         sp->handle = handle;
390         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
391         req->cnt -= req_cnt;
392
393         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
394         cmd_pkt->handle = handle;
395         /* Zero out remaining portion of packet. */
396         clr_ptr = (uint32_t *)cmd_pkt + 2;
397         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
398         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399
400         /* Set target ID and LUN number*/
401         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
402         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
403         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
404
405         /* Load SCSI command packet. */
406         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
407         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
408
409         /* Build IOCB segments */
410         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
411
412         /* Set total data segment count. */
413         cmd_pkt->entry_count = (uint8_t)req_cnt;
414         wmb();
415
416         /* Adjust ring index. */
417         req->ring_index++;
418         if (req->ring_index == req->length) {
419                 req->ring_index = 0;
420                 req->ring_ptr = req->ring;
421         } else
422                 req->ring_ptr++;
423
424         sp->flags |= SRB_DMA_VALID;
425
426         /* Set chip new ring index. */
427         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
428         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
429
430         /* Manage unprocessed RIO/ZIO commands in response queue. */
431         if (vha->flags.process_response_queue &&
432             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
433                 qla2x00_process_response_queue(rsp);
434
435         spin_unlock_irqrestore(&ha->hardware_lock, flags);
436         return (QLA_SUCCESS);
437
438 queuing_error:
439         if (tot_dsds)
440                 scsi_dma_unmap(cmd);
441
442         spin_unlock_irqrestore(&ha->hardware_lock, flags);
443
444         return (QLA_FUNCTION_FAILED);
445 }
446
447 /**
448  * qla2x00_start_iocbs() - Execute the IOCB command
449  * @vha: HA context
450  * @req: request queue
451  */
452 void
453 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
454 {
455         struct qla_hw_data *ha = vha->hw;
456         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
457
458         if (IS_P3P_TYPE(ha)) {
459                 qla82xx_start_iocbs(vha);
460         } else {
461                 /* Adjust ring index. */
462                 req->ring_index++;
463                 if (req->ring_index == req->length) {
464                         req->ring_index = 0;
465                         req->ring_ptr = req->ring;
466                 } else
467                         req->ring_ptr++;
468
469                 /* Set chip new ring index. */
470                 if (ha->mqenable || IS_QLA27XX(ha)) {
471                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
472                 } else if (IS_QLA83XX(ha)) {
473                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
474                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
475                 } else if (IS_QLAFX00(ha)) {
476                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
477                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
478                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
479                 } else if (IS_FWI2_CAPABLE(ha)) {
480                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
481                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
482                 } else {
483                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
484                                 req->ring_index);
485                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
486                 }
487         }
488 }
489
490 /**
491  * qla2x00_marker() - Send a marker IOCB to the firmware.
492  * @vha: HA context
493  * @req: request queue
494  * @rsp: response queue
495  * @loop_id: loop ID
496  * @lun: LUN
497  * @type: marker modifier
498  *
499  * Can be called from both normal and interrupt context.
500  *
501  * Returns non-zero if a failure occurred, else zero.
502  */
503 static int
504 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
505                         struct rsp_que *rsp, uint16_t loop_id,
506                         uint64_t lun, uint8_t type)
507 {
508         mrk_entry_t *mrk;
509         struct mrk_entry_24xx *mrk24 = NULL;
510
511         struct qla_hw_data *ha = vha->hw;
512         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
513
514         req = ha->req_q_map[0];
515         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
516         if (mrk == NULL) {
517                 ql_log(ql_log_warn, base_vha, 0x3026,
518                     "Failed to allocate Marker IOCB.\n");
519
520                 return (QLA_FUNCTION_FAILED);
521         }
522
523         mrk->entry_type = MARKER_TYPE;
524         mrk->modifier = type;
525         if (type != MK_SYNC_ALL) {
526                 if (IS_FWI2_CAPABLE(ha)) {
527                         mrk24 = (struct mrk_entry_24xx *) mrk;
528                         mrk24->nport_handle = cpu_to_le16(loop_id);
529                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531                         mrk24->vp_index = vha->vp_idx;
532                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
533                 } else {
534                         SET_TARGET_ID(ha, mrk->target, loop_id);
535                         mrk->lun = cpu_to_le16((uint16_t)lun);
536                 }
537         }
538         wmb();
539
540         qla2x00_start_iocbs(vha, req);
541
542         return (QLA_SUCCESS);
543 }
544
545 int
546 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
547                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
548                 uint8_t type)
549 {
550         int ret;
551         unsigned long flags = 0;
552
553         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
554         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
555         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
556
557         return (ret);
558 }
559
560 /*
561  * qla2x00_issue_marker
562  *
563  * Issue marker
564  * Caller CAN have hardware lock held as specified by ha_locked parameter.
565  * Might release it, then reaquire.
566  */
567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568 {
569         if (ha_locked) {
570                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
571                                         MK_SYNC_ALL) != QLA_SUCCESS)
572                         return QLA_FUNCTION_FAILED;
573         } else {
574                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
575                                         MK_SYNC_ALL) != QLA_SUCCESS)
576                         return QLA_FUNCTION_FAILED;
577         }
578         vha->marker_needed = 0;
579
580         return QLA_SUCCESS;
581 }
582
583 static inline int
584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585         uint16_t tot_dsds)
586 {
587         uint32_t *cur_dsd = NULL;
588         scsi_qla_host_t *vha;
589         struct qla_hw_data *ha;
590         struct scsi_cmnd *cmd;
591         struct  scatterlist *cur_seg;
592         uint32_t *dsd_seg;
593         void *next_dsd;
594         uint8_t avail_dsds;
595         uint8_t first_iocb = 1;
596         uint32_t dsd_list_len;
597         struct dsd_dma *dsd_ptr;
598         struct ct6_dsd *ctx;
599
600         cmd = GET_CMD_SP(sp);
601
602         /* Update entry type to indicate Command Type 3 IOCB */
603         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
604
605         /* No data transfer */
606         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
607                 cmd_pkt->byte_count = cpu_to_le32(0);
608                 return 0;
609         }
610
611         vha = sp->vha;
612         ha = vha->hw;
613
614         /* Set transfer direction */
615         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
616                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
617                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
618                 vha->qla_stats.output_requests++;
619         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
620                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
621                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622                 vha->qla_stats.input_requests++;
623         }
624
625         cur_seg = scsi_sglist(cmd);
626         ctx = GET_CMD_CTX_SP(sp);
627
628         while (tot_dsds) {
629                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630                     QLA_DSDS_PER_IOCB : tot_dsds;
631                 tot_dsds -= avail_dsds;
632                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
633
634                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635                     struct dsd_dma, list);
636                 next_dsd = dsd_ptr->dsd_addr;
637                 list_del(&dsd_ptr->list);
638                 ha->gbl_dsd_avail--;
639                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640                 ctx->dsd_use_cnt++;
641                 ha->gbl_dsd_inuse++;
642
643                 if (first_iocb) {
644                         first_iocb = 0;
645                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
649                 } else {
650                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
653                 }
654                 cur_dsd = (uint32_t *)next_dsd;
655                 while (avail_dsds) {
656                         dma_addr_t      sle_dma;
657
658                         sle_dma = sg_dma_address(cur_seg);
659                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
662                         cur_seg = sg_next(cur_seg);
663                         avail_dsds--;
664                 }
665         }
666
667         /* Null termination */
668         *cur_dsd++ =  0;
669         *cur_dsd++ = 0;
670         *cur_dsd++ = 0;
671         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
672         return 0;
673 }
674
675 /*
676  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677  * for Command Type 6.
678  *
679  * @dsds: number of data segment decriptors needed
680  *
681  * Returns the number of dsd list needed to store @dsds.
682  */
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds)
685 {
686         uint16_t dsd_lists = 0;
687
688         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689         if (dsds % QLA_DSDS_PER_IOCB)
690                 dsd_lists++;
691         return dsd_lists;
692 }
693
694
695 /**
696  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
697  * IOCB types.
698  *
699  * @sp: SRB command to process
700  * @cmd_pkt: Command type 3 IOCB
701  * @tot_dsds: Total number of segments to transfer
702  * @req: pointer to request queue
703  */
704 inline void
705 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
706         uint16_t tot_dsds, struct req_que *req)
707 {
708         uint16_t        avail_dsds;
709         uint32_t        *cur_dsd;
710         scsi_qla_host_t *vha;
711         struct scsi_cmnd *cmd;
712         struct scatterlist *sg;
713         int i;
714
715         cmd = GET_CMD_SP(sp);
716
717         /* Update entry type to indicate Command Type 3 IOCB */
718         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
719
720         /* No data transfer */
721         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
722                 cmd_pkt->byte_count = cpu_to_le32(0);
723                 return;
724         }
725
726         vha = sp->vha;
727
728         /* Set transfer direction */
729         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
730                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
731                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
732                 vha->qla_stats.output_requests++;
733         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
734                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
735                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
736                 vha->qla_stats.input_requests++;
737         }
738
739         /* One DSD is available in the Command Type 3 IOCB */
740         avail_dsds = 1;
741         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
742
743         /* Load data segments */
744
745         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
746                 dma_addr_t      sle_dma;
747                 cont_a64_entry_t *cont_pkt;
748
749                 /* Allocate additional continuation packets? */
750                 if (avail_dsds == 0) {
751                         /*
752                          * Five DSDs are available in the Continuation
753                          * Type 1 IOCB.
754                          */
755                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
756                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
757                         avail_dsds = 5;
758                 }
759
760                 sle_dma = sg_dma_address(sg);
761                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
762                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
763                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
764                 avail_dsds--;
765         }
766 }
767
768 struct fw_dif_context {
769         uint32_t ref_tag;
770         uint16_t app_tag;
771         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
772         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
773 };
774
775 /*
776  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
777  *
778  */
779 static inline void
780 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
781     unsigned int protcnt)
782 {
783         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
784
785         switch (scsi_get_prot_type(cmd)) {
786         case SCSI_PROT_DIF_TYPE0:
787                 /*
788                  * No check for ql2xenablehba_err_chk, as it would be an
789                  * I/O error if hba tag generation is not done.
790                  */
791                 pkt->ref_tag = cpu_to_le32((uint32_t)
792                     (0xffffffff & scsi_get_lba(cmd)));
793
794                 if (!qla2x00_hba_err_chk_enabled(sp))
795                         break;
796
797                 pkt->ref_tag_mask[0] = 0xff;
798                 pkt->ref_tag_mask[1] = 0xff;
799                 pkt->ref_tag_mask[2] = 0xff;
800                 pkt->ref_tag_mask[3] = 0xff;
801                 break;
802
803         /*
804          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805          * match LBA in CDB + N
806          */
807         case SCSI_PROT_DIF_TYPE2:
808                 pkt->app_tag = cpu_to_le16(0);
809                 pkt->app_tag_mask[0] = 0x0;
810                 pkt->app_tag_mask[1] = 0x0;
811
812                 pkt->ref_tag = cpu_to_le32((uint32_t)
813                     (0xffffffff & scsi_get_lba(cmd)));
814
815                 if (!qla2x00_hba_err_chk_enabled(sp))
816                         break;
817
818                 /* enable ALL bytes of the ref tag */
819                 pkt->ref_tag_mask[0] = 0xff;
820                 pkt->ref_tag_mask[1] = 0xff;
821                 pkt->ref_tag_mask[2] = 0xff;
822                 pkt->ref_tag_mask[3] = 0xff;
823                 break;
824
825         /* For Type 3 protection: 16 bit GUARD only */
826         case SCSI_PROT_DIF_TYPE3:
827                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
828                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
829                                                                 0x00;
830                 break;
831
832         /*
833          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
834          * 16 bit app tag.
835          */
836         case SCSI_PROT_DIF_TYPE1:
837                 pkt->ref_tag = cpu_to_le32((uint32_t)
838                     (0xffffffff & scsi_get_lba(cmd)));
839                 pkt->app_tag = cpu_to_le16(0);
840                 pkt->app_tag_mask[0] = 0x0;
841                 pkt->app_tag_mask[1] = 0x0;
842
843                 if (!qla2x00_hba_err_chk_enabled(sp))
844                         break;
845
846                 /* enable ALL bytes of the ref tag */
847                 pkt->ref_tag_mask[0] = 0xff;
848                 pkt->ref_tag_mask[1] = 0xff;
849                 pkt->ref_tag_mask[2] = 0xff;
850                 pkt->ref_tag_mask[3] = 0xff;
851                 break;
852         }
853 }
854
855 int
856 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
857         uint32_t *partial)
858 {
859         struct scatterlist *sg;
860         uint32_t cumulative_partial, sg_len;
861         dma_addr_t sg_dma_addr;
862
863         if (sgx->num_bytes == sgx->tot_bytes)
864                 return 0;
865
866         sg = sgx->cur_sg;
867         cumulative_partial = sgx->tot_partial;
868
869         sg_dma_addr = sg_dma_address(sg);
870         sg_len = sg_dma_len(sg);
871
872         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
873
874         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
875                 sgx->dma_len = (blk_sz - cumulative_partial);
876                 sgx->tot_partial = 0;
877                 sgx->num_bytes += blk_sz;
878                 *partial = 0;
879         } else {
880                 sgx->dma_len = sg_len - sgx->bytes_consumed;
881                 sgx->tot_partial += sgx->dma_len;
882                 *partial = 1;
883         }
884
885         sgx->bytes_consumed += sgx->dma_len;
886
887         if (sg_len == sgx->bytes_consumed) {
888                 sg = sg_next(sg);
889                 sgx->num_sg++;
890                 sgx->cur_sg = sg;
891                 sgx->bytes_consumed = 0;
892         }
893
894         return 1;
895 }
896
897 int
898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
899         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
900 {
901         void *next_dsd;
902         uint8_t avail_dsds = 0;
903         uint32_t dsd_list_len;
904         struct dsd_dma *dsd_ptr;
905         struct scatterlist *sg_prot;
906         uint32_t *cur_dsd = dsd;
907         uint16_t        used_dsds = tot_dsds;
908         uint32_t        prot_int; /* protection interval */
909         uint32_t        partial;
910         struct qla2_sgx sgx;
911         dma_addr_t      sle_dma;
912         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
913         struct scsi_cmnd *cmd;
914
915         memset(&sgx, 0, sizeof(struct qla2_sgx));
916         if (sp) {
917                 cmd = GET_CMD_SP(sp);
918                 prot_int = cmd->device->sector_size;
919
920                 sgx.tot_bytes = scsi_bufflen(cmd);
921                 sgx.cur_sg = scsi_sglist(cmd);
922                 sgx.sp = sp;
923
924                 sg_prot = scsi_prot_sglist(cmd);
925         } else if (tc) {
926                 prot_int      = tc->blk_sz;
927                 sgx.tot_bytes = tc->bufflen;
928                 sgx.cur_sg    = tc->sg;
929                 sg_prot       = tc->prot_sg;
930         } else {
931                 BUG();
932                 return 1;
933         }
934
935         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
936
937                 sle_dma = sgx.dma_addr;
938                 sle_dma_len = sgx.dma_len;
939 alloc_and_fill:
940                 /* Allocate additional continuation packets? */
941                 if (avail_dsds == 0) {
942                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
943                                         QLA_DSDS_PER_IOCB : used_dsds;
944                         dsd_list_len = (avail_dsds + 1) * 12;
945                         used_dsds -= avail_dsds;
946
947                         /* allocate tracking DS */
948                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
949                         if (!dsd_ptr)
950                                 return 1;
951
952                         /* allocate new list */
953                         dsd_ptr->dsd_addr = next_dsd =
954                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
955                                 &dsd_ptr->dsd_list_dma);
956
957                         if (!next_dsd) {
958                                 /*
959                                  * Need to cleanup only this dsd_ptr, rest
960                                  * will be done by sp_free_dma()
961                                  */
962                                 kfree(dsd_ptr);
963                                 return 1;
964                         }
965
966                         if (sp) {
967                                 list_add_tail(&dsd_ptr->list,
968                                     &((struct crc_context *)
969                                             sp->u.scmd.ctx)->dsd_list);
970
971                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
972                         } else {
973                                 list_add_tail(&dsd_ptr->list,
974                                     &(tc->ctx->dsd_list));
975                                 *tc->ctx_dsd_alloced = 1;
976                         }
977
978
979                         /* add new list to cmd iocb or last list */
980                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
981                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
982                         *cur_dsd++ = dsd_list_len;
983                         cur_dsd = (uint32_t *)next_dsd;
984                 }
985                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
986                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
987                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
988                 avail_dsds--;
989
990                 if (partial == 0) {
991                         /* Got a full protection interval */
992                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
993                         sle_dma_len = 8;
994
995                         tot_prot_dma_len += sle_dma_len;
996                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
997                                 tot_prot_dma_len = 0;
998                                 sg_prot = sg_next(sg_prot);
999                         }
1000
1001                         partial = 1; /* So as to not re-enter this block */
1002                         goto alloc_and_fill;
1003                 }
1004         }
1005         /* Null termination */
1006         *cur_dsd++ = 0;
1007         *cur_dsd++ = 0;
1008         *cur_dsd++ = 0;
1009         return 0;
1010 }
1011
1012 int
1013 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1014         uint16_t tot_dsds, struct qla_tc_param *tc)
1015 {
1016         void *next_dsd;
1017         uint8_t avail_dsds = 0;
1018         uint32_t dsd_list_len;
1019         struct dsd_dma *dsd_ptr;
1020         struct scatterlist *sg, *sgl;
1021         uint32_t *cur_dsd = dsd;
1022         int     i;
1023         uint16_t        used_dsds = tot_dsds;
1024         struct scsi_cmnd *cmd;
1025
1026         if (sp) {
1027                 cmd = GET_CMD_SP(sp);
1028                 sgl = scsi_sglist(cmd);
1029         } else if (tc) {
1030                 sgl = tc->sg;
1031         } else {
1032                 BUG();
1033                 return 1;
1034         }
1035
1036
1037         for_each_sg(sgl, sg, tot_dsds, i) {
1038                 dma_addr_t      sle_dma;
1039
1040                 /* Allocate additional continuation packets? */
1041                 if (avail_dsds == 0) {
1042                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1043                                         QLA_DSDS_PER_IOCB : used_dsds;
1044                         dsd_list_len = (avail_dsds + 1) * 12;
1045                         used_dsds -= avail_dsds;
1046
1047                         /* allocate tracking DS */
1048                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1049                         if (!dsd_ptr)
1050                                 return 1;
1051
1052                         /* allocate new list */
1053                         dsd_ptr->dsd_addr = next_dsd =
1054                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1055                                 &dsd_ptr->dsd_list_dma);
1056
1057                         if (!next_dsd) {
1058                                 /*
1059                                  * Need to cleanup only this dsd_ptr, rest
1060                                  * will be done by sp_free_dma()
1061                                  */
1062                                 kfree(dsd_ptr);
1063                                 return 1;
1064                         }
1065
1066                         if (sp) {
1067                                 list_add_tail(&dsd_ptr->list,
1068                                     &((struct crc_context *)
1069                                             sp->u.scmd.ctx)->dsd_list);
1070
1071                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1072                         } else {
1073                                 list_add_tail(&dsd_ptr->list,
1074                                     &(tc->ctx->dsd_list));
1075                                 *tc->ctx_dsd_alloced = 1;
1076                         }
1077
1078                         /* add new list to cmd iocb or last list */
1079                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1080                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1081                         *cur_dsd++ = dsd_list_len;
1082                         cur_dsd = (uint32_t *)next_dsd;
1083                 }
1084                 sle_dma = sg_dma_address(sg);
1085
1086                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1087                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1088                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1089                 avail_dsds--;
1090
1091         }
1092         /* Null termination */
1093         *cur_dsd++ = 0;
1094         *cur_dsd++ = 0;
1095         *cur_dsd++ = 0;
1096         return 0;
1097 }
1098
1099 int
1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1101         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1102 {
1103         void *next_dsd;
1104         uint8_t avail_dsds = 0;
1105         uint32_t dsd_list_len;
1106         struct dsd_dma *dsd_ptr;
1107         struct scatterlist *sg, *sgl;
1108         int     i;
1109         struct scsi_cmnd *cmd;
1110         uint32_t *cur_dsd = dsd;
1111         uint16_t used_dsds = tot_dsds;
1112         struct scsi_qla_host *vha;
1113
1114         if (sp) {
1115                 cmd = GET_CMD_SP(sp);
1116                 sgl = scsi_prot_sglist(cmd);
1117                 vha = sp->vha;
1118         } else if (tc) {
1119                 vha = tc->vha;
1120                 sgl = tc->prot_sg;
1121         } else {
1122                 BUG();
1123                 return 1;
1124         }
1125
1126         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1127                 "%s: enter\n", __func__);
1128
1129         for_each_sg(sgl, sg, tot_dsds, i) {
1130                 dma_addr_t      sle_dma;
1131
1132                 /* Allocate additional continuation packets? */
1133                 if (avail_dsds == 0) {
1134                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1135                                                 QLA_DSDS_PER_IOCB : used_dsds;
1136                         dsd_list_len = (avail_dsds + 1) * 12;
1137                         used_dsds -= avail_dsds;
1138
1139                         /* allocate tracking DS */
1140                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1141                         if (!dsd_ptr)
1142                                 return 1;
1143
1144                         /* allocate new list */
1145                         dsd_ptr->dsd_addr = next_dsd =
1146                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1147                                 &dsd_ptr->dsd_list_dma);
1148
1149                         if (!next_dsd) {
1150                                 /*
1151                                  * Need to cleanup only this dsd_ptr, rest
1152                                  * will be done by sp_free_dma()
1153                                  */
1154                                 kfree(dsd_ptr);
1155                                 return 1;
1156                         }
1157
1158                         if (sp) {
1159                                 list_add_tail(&dsd_ptr->list,
1160                                     &((struct crc_context *)
1161                                             sp->u.scmd.ctx)->dsd_list);
1162
1163                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1164                         } else {
1165                                 list_add_tail(&dsd_ptr->list,
1166                                     &(tc->ctx->dsd_list));
1167                                 *tc->ctx_dsd_alloced = 1;
1168                         }
1169
1170                         /* add new list to cmd iocb or last list */
1171                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1172                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1173                         *cur_dsd++ = dsd_list_len;
1174                         cur_dsd = (uint32_t *)next_dsd;
1175                 }
1176                 sle_dma = sg_dma_address(sg);
1177
1178                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1179                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1180                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1181
1182                 avail_dsds--;
1183         }
1184         /* Null termination */
1185         *cur_dsd++ = 0;
1186         *cur_dsd++ = 0;
1187         *cur_dsd++ = 0;
1188         return 0;
1189 }
1190
1191 /**
1192  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1193  *                                                      Type 6 IOCB types.
1194  *
1195  * @sp: SRB command to process
1196  * @cmd_pkt: Command type 3 IOCB
1197  * @tot_dsds: Total number of segments to transfer
1198  * @tot_prot_dsds:
1199  * @fw_prot_opts:
1200  */
1201 inline int
1202 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1203     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1204 {
1205         uint32_t                *cur_dsd, *fcp_dl;
1206         scsi_qla_host_t         *vha;
1207         struct scsi_cmnd        *cmd;
1208         uint32_t                total_bytes = 0;
1209         uint32_t                data_bytes;
1210         uint32_t                dif_bytes;
1211         uint8_t                 bundling = 1;
1212         uint16_t                blk_size;
1213         struct crc_context      *crc_ctx_pkt = NULL;
1214         struct qla_hw_data      *ha;
1215         uint8_t                 additional_fcpcdb_len;
1216         uint16_t                fcp_cmnd_len;
1217         struct fcp_cmnd         *fcp_cmnd;
1218         dma_addr_t              crc_ctx_dma;
1219
1220         cmd = GET_CMD_SP(sp);
1221
1222         /* Update entry type to indicate Command Type CRC_2 IOCB */
1223         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1224
1225         vha = sp->vha;
1226         ha = vha->hw;
1227
1228         /* No data transfer */
1229         data_bytes = scsi_bufflen(cmd);
1230         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1231                 cmd_pkt->byte_count = cpu_to_le32(0);
1232                 return QLA_SUCCESS;
1233         }
1234
1235         cmd_pkt->vp_index = sp->vha->vp_idx;
1236
1237         /* Set transfer direction */
1238         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1239                 cmd_pkt->control_flags =
1240                     cpu_to_le16(CF_WRITE_DATA);
1241         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1242                 cmd_pkt->control_flags =
1243                     cpu_to_le16(CF_READ_DATA);
1244         }
1245
1246         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1247             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1248             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1249             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1250                 bundling = 0;
1251
1252         /* Allocate CRC context from global pool */
1253         crc_ctx_pkt = sp->u.scmd.ctx =
1254             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1255
1256         if (!crc_ctx_pkt)
1257                 goto crc_queuing_error;
1258
1259         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1260
1261         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1262
1263         /* Set handle */
1264         crc_ctx_pkt->handle = cmd_pkt->handle;
1265
1266         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1267
1268         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1269             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1270
1271         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1272         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1273         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1274
1275         /* Determine SCSI command length -- align to 4 byte boundary */
1276         if (cmd->cmd_len > 16) {
1277                 additional_fcpcdb_len = cmd->cmd_len - 16;
1278                 if ((cmd->cmd_len % 4) != 0) {
1279                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1280                         goto crc_queuing_error;
1281                 }
1282                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1283         } else {
1284                 additional_fcpcdb_len = 0;
1285                 fcp_cmnd_len = 12 + 16 + 4;
1286         }
1287
1288         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1289
1290         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1291         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1292                 fcp_cmnd->additional_cdb_len |= 1;
1293         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1294                 fcp_cmnd->additional_cdb_len |= 2;
1295
1296         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1297         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1298         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1299         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1300             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1302             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1303         fcp_cmnd->task_management = 0;
1304         fcp_cmnd->task_attribute = TSK_SIMPLE;
1305
1306         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1307
1308         /* Compute dif len and adjust data len to incude protection */
1309         dif_bytes = 0;
1310         blk_size = cmd->device->sector_size;
1311         dif_bytes = (data_bytes / blk_size) * 8;
1312
1313         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1314         case SCSI_PROT_READ_INSERT:
1315         case SCSI_PROT_WRITE_STRIP:
1316             total_bytes = data_bytes;
1317             data_bytes += dif_bytes;
1318             break;
1319
1320         case SCSI_PROT_READ_STRIP:
1321         case SCSI_PROT_WRITE_INSERT:
1322         case SCSI_PROT_READ_PASS:
1323         case SCSI_PROT_WRITE_PASS:
1324             total_bytes = data_bytes + dif_bytes;
1325             break;
1326         default:
1327             BUG();
1328         }
1329
1330         if (!qla2x00_hba_err_chk_enabled(sp))
1331                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1332         /* HBA error checking enabled */
1333         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1334                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1335                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1336                         SCSI_PROT_DIF_TYPE2))
1337                         fw_prot_opts |= BIT_10;
1338                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1339                     SCSI_PROT_DIF_TYPE3)
1340                         fw_prot_opts |= BIT_11;
1341         }
1342
1343         if (!bundling) {
1344                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1345         } else {
1346                 /*
1347                  * Configure Bundling if we need to fetch interlaving
1348                  * protection PCI accesses
1349                  */
1350                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1351                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1352                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1353                                                         tot_prot_dsds);
1354                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1355         }
1356
1357         /* Finish the common fields of CRC pkt */
1358         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1359         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1360         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1361         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1362         /* Fibre channel byte count */
1363         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1364         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1365             additional_fcpcdb_len);
1366         *fcp_dl = htonl(total_bytes);
1367
1368         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1369                 cmd_pkt->byte_count = cpu_to_le32(0);
1370                 return QLA_SUCCESS;
1371         }
1372         /* Walks data segments */
1373
1374         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1375
1376         if (!bundling && tot_prot_dsds) {
1377                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1378                         cur_dsd, tot_dsds, NULL))
1379                         goto crc_queuing_error;
1380         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1381                         (tot_dsds - tot_prot_dsds), NULL))
1382                 goto crc_queuing_error;
1383
1384         if (bundling && tot_prot_dsds) {
1385                 /* Walks dif segments */
1386                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1387                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1388                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1389                                 tot_prot_dsds, NULL))
1390                         goto crc_queuing_error;
1391         }
1392         return QLA_SUCCESS;
1393
1394 crc_queuing_error:
1395         /* Cleanup will be performed by the caller */
1396
1397         return QLA_FUNCTION_FAILED;
1398 }
1399
1400 /**
1401  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1402  * @sp: command to send to the ISP
1403  *
1404  * Returns non-zero if a failure occurred, else zero.
1405  */
1406 int
1407 qla24xx_start_scsi(srb_t *sp)
1408 {
1409         int             nseg;
1410         unsigned long   flags;
1411         uint32_t        *clr_ptr;
1412         uint32_t        index;
1413         uint32_t        handle;
1414         struct cmd_type_7 *cmd_pkt;
1415         uint16_t        cnt;
1416         uint16_t        req_cnt;
1417         uint16_t        tot_dsds;
1418         struct req_que *req = NULL;
1419         struct rsp_que *rsp = NULL;
1420         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1421         struct scsi_qla_host *vha = sp->vha;
1422         struct qla_hw_data *ha = vha->hw;
1423
1424         /* Setup device pointers. */
1425         req = vha->req;
1426         rsp = req->rsp;
1427
1428         /* So we know we haven't pci_map'ed anything yet */
1429         tot_dsds = 0;
1430
1431         /* Send marker if required */
1432         if (vha->marker_needed != 0) {
1433                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1434                     QLA_SUCCESS)
1435                         return QLA_FUNCTION_FAILED;
1436                 vha->marker_needed = 0;
1437         }
1438
1439         /* Acquire ring specific lock */
1440         spin_lock_irqsave(&ha->hardware_lock, flags);
1441
1442         /* Check for room in outstanding command list. */
1443         handle = req->current_outstanding_cmd;
1444         for (index = 1; index < req->num_outstanding_cmds; index++) {
1445                 handle++;
1446                 if (handle == req->num_outstanding_cmds)
1447                         handle = 1;
1448                 if (!req->outstanding_cmds[handle])
1449                         break;
1450         }
1451         if (index == req->num_outstanding_cmds)
1452                 goto queuing_error;
1453
1454         /* Map the sg table so we have an accurate count of sg entries needed */
1455         if (scsi_sg_count(cmd)) {
1456                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1457                     scsi_sg_count(cmd), cmd->sc_data_direction);
1458                 if (unlikely(!nseg))
1459                         goto queuing_error;
1460         } else
1461                 nseg = 0;
1462
1463         tot_dsds = nseg;
1464         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1465         if (req->cnt < (req_cnt + 2)) {
1466                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1467                     RD_REG_DWORD_RELAXED(req->req_q_out);
1468                 if (req->ring_index < cnt)
1469                         req->cnt = cnt - req->ring_index;
1470                 else
1471                         req->cnt = req->length -
1472                                 (req->ring_index - cnt);
1473                 if (req->cnt < (req_cnt + 2))
1474                         goto queuing_error;
1475         }
1476
1477         /* Build command packet. */
1478         req->current_outstanding_cmd = handle;
1479         req->outstanding_cmds[handle] = sp;
1480         sp->handle = handle;
1481         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1482         req->cnt -= req_cnt;
1483
1484         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1485         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1486
1487         /* Zero out remaining portion of packet. */
1488         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1489         clr_ptr = (uint32_t *)cmd_pkt + 2;
1490         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1491         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1492
1493         /* Set NPORT-ID and LUN number*/
1494         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1495         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1496         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1497         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1498         cmd_pkt->vp_index = sp->vha->vp_idx;
1499
1500         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1501         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1502
1503         cmd_pkt->task = TSK_SIMPLE;
1504
1505         /* Load SCSI command packet. */
1506         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1507         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1508
1509         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1510
1511         /* Build IOCB segments */
1512         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1513
1514         /* Set total data segment count. */
1515         cmd_pkt->entry_count = (uint8_t)req_cnt;
1516         wmb();
1517         /* Adjust ring index. */
1518         req->ring_index++;
1519         if (req->ring_index == req->length) {
1520                 req->ring_index = 0;
1521                 req->ring_ptr = req->ring;
1522         } else
1523                 req->ring_ptr++;
1524
1525         sp->flags |= SRB_DMA_VALID;
1526
1527         /* Set chip new ring index. */
1528         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1529
1530         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1531         return QLA_SUCCESS;
1532
1533 queuing_error:
1534         if (tot_dsds)
1535                 scsi_dma_unmap(cmd);
1536
1537         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1538
1539         return QLA_FUNCTION_FAILED;
1540 }
1541
1542 /**
1543  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1544  * @sp: command to send to the ISP
1545  *
1546  * Returns non-zero if a failure occurred, else zero.
1547  */
1548 int
1549 qla24xx_dif_start_scsi(srb_t *sp)
1550 {
1551         int                     nseg;
1552         unsigned long           flags;
1553         uint32_t                *clr_ptr;
1554         uint32_t                index;
1555         uint32_t                handle;
1556         uint16_t                cnt;
1557         uint16_t                req_cnt = 0;
1558         uint16_t                tot_dsds;
1559         uint16_t                tot_prot_dsds;
1560         uint16_t                fw_prot_opts = 0;
1561         struct req_que          *req = NULL;
1562         struct rsp_que          *rsp = NULL;
1563         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1564         struct scsi_qla_host    *vha = sp->vha;
1565         struct qla_hw_data      *ha = vha->hw;
1566         struct cmd_type_crc_2   *cmd_pkt;
1567         uint32_t                status = 0;
1568
1569 #define QDSS_GOT_Q_SPACE        BIT_0
1570
1571         /* Only process protection or >16 cdb in this routine */
1572         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1573                 if (cmd->cmd_len <= 16)
1574                         return qla24xx_start_scsi(sp);
1575         }
1576
1577         /* Setup device pointers. */
1578         req = vha->req;
1579         rsp = req->rsp;
1580
1581         /* So we know we haven't pci_map'ed anything yet */
1582         tot_dsds = 0;
1583
1584         /* Send marker if required */
1585         if (vha->marker_needed != 0) {
1586                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1587                     QLA_SUCCESS)
1588                         return QLA_FUNCTION_FAILED;
1589                 vha->marker_needed = 0;
1590         }
1591
1592         /* Acquire ring specific lock */
1593         spin_lock_irqsave(&ha->hardware_lock, flags);
1594
1595         /* Check for room in outstanding command list. */
1596         handle = req->current_outstanding_cmd;
1597         for (index = 1; index < req->num_outstanding_cmds; index++) {
1598                 handle++;
1599                 if (handle == req->num_outstanding_cmds)
1600                         handle = 1;
1601                 if (!req->outstanding_cmds[handle])
1602                         break;
1603         }
1604
1605         if (index == req->num_outstanding_cmds)
1606                 goto queuing_error;
1607
1608         /* Compute number of required data segments */
1609         /* Map the sg table so we have an accurate count of sg entries needed */
1610         if (scsi_sg_count(cmd)) {
1611                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1612                     scsi_sg_count(cmd), cmd->sc_data_direction);
1613                 if (unlikely(!nseg))
1614                         goto queuing_error;
1615                 else
1616                         sp->flags |= SRB_DMA_VALID;
1617
1618                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1619                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1620                         struct qla2_sgx sgx;
1621                         uint32_t        partial;
1622
1623                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1624                         sgx.tot_bytes = scsi_bufflen(cmd);
1625                         sgx.cur_sg = scsi_sglist(cmd);
1626                         sgx.sp = sp;
1627
1628                         nseg = 0;
1629                         while (qla24xx_get_one_block_sg(
1630                             cmd->device->sector_size, &sgx, &partial))
1631                                 nseg++;
1632                 }
1633         } else
1634                 nseg = 0;
1635
1636         /* number of required data segments */
1637         tot_dsds = nseg;
1638
1639         /* Compute number of required protection segments */
1640         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1641                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1642                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1643                 if (unlikely(!nseg))
1644                         goto queuing_error;
1645                 else
1646                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1647
1648                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1649                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1650                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1651                 }
1652         } else {
1653                 nseg = 0;
1654         }
1655
1656         req_cnt = 1;
1657         /* Total Data and protection sg segment(s) */
1658         tot_prot_dsds = nseg;
1659         tot_dsds += nseg;
1660         if (req->cnt < (req_cnt + 2)) {
1661                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1662                     RD_REG_DWORD_RELAXED(req->req_q_out);
1663                 if (req->ring_index < cnt)
1664                         req->cnt = cnt - req->ring_index;
1665                 else
1666                         req->cnt = req->length -
1667                                 (req->ring_index - cnt);
1668                 if (req->cnt < (req_cnt + 2))
1669                         goto queuing_error;
1670         }
1671
1672         status |= QDSS_GOT_Q_SPACE;
1673
1674         /* Build header part of command packet (excluding the OPCODE). */
1675         req->current_outstanding_cmd = handle;
1676         req->outstanding_cmds[handle] = sp;
1677         sp->handle = handle;
1678         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1679         req->cnt -= req_cnt;
1680
1681         /* Fill-in common area */
1682         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1683         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1684
1685         clr_ptr = (uint32_t *)cmd_pkt + 2;
1686         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1687
1688         /* Set NPORT-ID and LUN number*/
1689         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1690         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1691         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1692         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1693
1694         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1695         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1696
1697         /* Total Data and protection segment(s) */
1698         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1699
1700         /* Build IOCB segments and adjust for data protection segments */
1701         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1702             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1703                 QLA_SUCCESS)
1704                 goto queuing_error;
1705
1706         cmd_pkt->entry_count = (uint8_t)req_cnt;
1707         /* Specify response queue number where completion should happen */
1708         cmd_pkt->entry_status = (uint8_t) rsp->id;
1709         cmd_pkt->timeout = cpu_to_le16(0);
1710         wmb();
1711
1712         /* Adjust ring index. */
1713         req->ring_index++;
1714         if (req->ring_index == req->length) {
1715                 req->ring_index = 0;
1716                 req->ring_ptr = req->ring;
1717         } else
1718                 req->ring_ptr++;
1719
1720         /* Set chip new ring index. */
1721         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1722
1723         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1724
1725         return QLA_SUCCESS;
1726
1727 queuing_error:
1728         if (status & QDSS_GOT_Q_SPACE) {
1729                 req->outstanding_cmds[handle] = NULL;
1730                 req->cnt += req_cnt;
1731         }
1732         /* Cleanup will be performed by the caller (queuecommand) */
1733
1734         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1735         return QLA_FUNCTION_FAILED;
1736 }
1737
1738 /**
1739  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1740  * @sp: command to send to the ISP
1741  *
1742  * Returns non-zero if a failure occurred, else zero.
1743  */
1744 static int
1745 qla2xxx_start_scsi_mq(srb_t *sp)
1746 {
1747         int             nseg;
1748         unsigned long   flags;
1749         uint32_t        *clr_ptr;
1750         uint32_t        index;
1751         uint32_t        handle;
1752         struct cmd_type_7 *cmd_pkt;
1753         uint16_t        cnt;
1754         uint16_t        req_cnt;
1755         uint16_t        tot_dsds;
1756         struct req_que *req = NULL;
1757         struct rsp_que *rsp = NULL;
1758         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1759         struct scsi_qla_host *vha = sp->fcport->vha;
1760         struct qla_hw_data *ha = vha->hw;
1761         struct qla_qpair *qpair = sp->qpair;
1762
1763         /* Acquire qpair specific lock */
1764         spin_lock_irqsave(&qpair->qp_lock, flags);
1765
1766         /* Setup qpair pointers */
1767         rsp = qpair->rsp;
1768         req = qpair->req;
1769
1770         /* So we know we haven't pci_map'ed anything yet */
1771         tot_dsds = 0;
1772
1773         /* Send marker if required */
1774         if (vha->marker_needed != 0) {
1775                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1776                     QLA_SUCCESS) {
1777                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1778                         return QLA_FUNCTION_FAILED;
1779                 }
1780                 vha->marker_needed = 0;
1781         }
1782
1783         /* Check for room in outstanding command list. */
1784         handle = req->current_outstanding_cmd;
1785         for (index = 1; index < req->num_outstanding_cmds; index++) {
1786                 handle++;
1787                 if (handle == req->num_outstanding_cmds)
1788                         handle = 1;
1789                 if (!req->outstanding_cmds[handle])
1790                         break;
1791         }
1792         if (index == req->num_outstanding_cmds)
1793                 goto queuing_error;
1794
1795         /* Map the sg table so we have an accurate count of sg entries needed */
1796         if (scsi_sg_count(cmd)) {
1797                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1798                     scsi_sg_count(cmd), cmd->sc_data_direction);
1799                 if (unlikely(!nseg))
1800                         goto queuing_error;
1801         } else
1802                 nseg = 0;
1803
1804         tot_dsds = nseg;
1805         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1806         if (req->cnt < (req_cnt + 2)) {
1807                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1808                     RD_REG_DWORD_RELAXED(req->req_q_out);
1809                 if (req->ring_index < cnt)
1810                         req->cnt = cnt - req->ring_index;
1811                 else
1812                         req->cnt = req->length -
1813                                 (req->ring_index - cnt);
1814                 if (req->cnt < (req_cnt + 2))
1815                         goto queuing_error;
1816         }
1817
1818         /* Build command packet. */
1819         req->current_outstanding_cmd = handle;
1820         req->outstanding_cmds[handle] = sp;
1821         sp->handle = handle;
1822         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1823         req->cnt -= req_cnt;
1824
1825         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1826         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1827
1828         /* Zero out remaining portion of packet. */
1829         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1830         clr_ptr = (uint32_t *)cmd_pkt + 2;
1831         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1832         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1833
1834         /* Set NPORT-ID and LUN number*/
1835         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1836         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1837         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1838         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1839         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1840
1841         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1842         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1843
1844         cmd_pkt->task = TSK_SIMPLE;
1845
1846         /* Load SCSI command packet. */
1847         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1848         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1849
1850         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1851
1852         /* Build IOCB segments */
1853         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1854
1855         /* Set total data segment count. */
1856         cmd_pkt->entry_count = (uint8_t)req_cnt;
1857         wmb();
1858         /* Adjust ring index. */
1859         req->ring_index++;
1860         if (req->ring_index == req->length) {
1861                 req->ring_index = 0;
1862                 req->ring_ptr = req->ring;
1863         } else
1864                 req->ring_ptr++;
1865
1866         sp->flags |= SRB_DMA_VALID;
1867
1868         /* Set chip new ring index. */
1869         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1870
1871         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1872         return QLA_SUCCESS;
1873
1874 queuing_error:
1875         if (tot_dsds)
1876                 scsi_dma_unmap(cmd);
1877
1878         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1879
1880         return QLA_FUNCTION_FAILED;
1881 }
1882
1883
1884 /**
1885  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1886  * @sp: command to send to the ISP
1887  *
1888  * Returns non-zero if a failure occurred, else zero.
1889  */
1890 int
1891 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1892 {
1893         int                     nseg;
1894         unsigned long           flags;
1895         uint32_t                *clr_ptr;
1896         uint32_t                index;
1897         uint32_t                handle;
1898         uint16_t                cnt;
1899         uint16_t                req_cnt = 0;
1900         uint16_t                tot_dsds;
1901         uint16_t                tot_prot_dsds;
1902         uint16_t                fw_prot_opts = 0;
1903         struct req_que          *req = NULL;
1904         struct rsp_que          *rsp = NULL;
1905         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1906         struct scsi_qla_host    *vha = sp->fcport->vha;
1907         struct qla_hw_data      *ha = vha->hw;
1908         struct cmd_type_crc_2   *cmd_pkt;
1909         uint32_t                status = 0;
1910         struct qla_qpair        *qpair = sp->qpair;
1911
1912 #define QDSS_GOT_Q_SPACE        BIT_0
1913
1914         /* Check for host side state */
1915         if (!qpair->online) {
1916                 cmd->result = DID_NO_CONNECT << 16;
1917                 return QLA_INTERFACE_ERROR;
1918         }
1919
1920         if (!qpair->difdix_supported &&
1921                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1922                 cmd->result = DID_NO_CONNECT << 16;
1923                 return QLA_INTERFACE_ERROR;
1924         }
1925
1926         /* Only process protection or >16 cdb in this routine */
1927         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1928                 if (cmd->cmd_len <= 16)
1929                         return qla2xxx_start_scsi_mq(sp);
1930         }
1931
1932         spin_lock_irqsave(&qpair->qp_lock, flags);
1933
1934         /* Setup qpair pointers */
1935         rsp = qpair->rsp;
1936         req = qpair->req;
1937
1938         /* So we know we haven't pci_map'ed anything yet */
1939         tot_dsds = 0;
1940
1941         /* Send marker if required */
1942         if (vha->marker_needed != 0) {
1943                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1944                     QLA_SUCCESS) {
1945                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1946                         return QLA_FUNCTION_FAILED;
1947                 }
1948                 vha->marker_needed = 0;
1949         }
1950
1951         /* Check for room in outstanding command list. */
1952         handle = req->current_outstanding_cmd;
1953         for (index = 1; index < req->num_outstanding_cmds; index++) {
1954                 handle++;
1955                 if (handle == req->num_outstanding_cmds)
1956                         handle = 1;
1957                 if (!req->outstanding_cmds[handle])
1958                         break;
1959         }
1960
1961         if (index == req->num_outstanding_cmds)
1962                 goto queuing_error;
1963
1964         /* Compute number of required data segments */
1965         /* Map the sg table so we have an accurate count of sg entries needed */
1966         if (scsi_sg_count(cmd)) {
1967                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1968                     scsi_sg_count(cmd), cmd->sc_data_direction);
1969                 if (unlikely(!nseg))
1970                         goto queuing_error;
1971                 else
1972                         sp->flags |= SRB_DMA_VALID;
1973
1974                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1975                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1976                         struct qla2_sgx sgx;
1977                         uint32_t        partial;
1978
1979                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1980                         sgx.tot_bytes = scsi_bufflen(cmd);
1981                         sgx.cur_sg = scsi_sglist(cmd);
1982                         sgx.sp = sp;
1983
1984                         nseg = 0;
1985                         while (qla24xx_get_one_block_sg(
1986                             cmd->device->sector_size, &sgx, &partial))
1987                                 nseg++;
1988                 }
1989         } else
1990                 nseg = 0;
1991
1992         /* number of required data segments */
1993         tot_dsds = nseg;
1994
1995         /* Compute number of required protection segments */
1996         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1997                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1998                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1999                 if (unlikely(!nseg))
2000                         goto queuing_error;
2001                 else
2002                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2003
2004                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2005                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2006                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2007                 }
2008         } else {
2009                 nseg = 0;
2010         }
2011
2012         req_cnt = 1;
2013         /* Total Data and protection sg segment(s) */
2014         tot_prot_dsds = nseg;
2015         tot_dsds += nseg;
2016         if (req->cnt < (req_cnt + 2)) {
2017                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2018                     RD_REG_DWORD_RELAXED(req->req_q_out);
2019                 if (req->ring_index < cnt)
2020                         req->cnt = cnt - req->ring_index;
2021                 else
2022                         req->cnt = req->length -
2023                                 (req->ring_index - cnt);
2024                 if (req->cnt < (req_cnt + 2))
2025                         goto queuing_error;
2026         }
2027
2028         status |= QDSS_GOT_Q_SPACE;
2029
2030         /* Build header part of command packet (excluding the OPCODE). */
2031         req->current_outstanding_cmd = handle;
2032         req->outstanding_cmds[handle] = sp;
2033         sp->handle = handle;
2034         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2035         req->cnt -= req_cnt;
2036
2037         /* Fill-in common area */
2038         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2039         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2040
2041         clr_ptr = (uint32_t *)cmd_pkt + 2;
2042         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2043
2044         /* Set NPORT-ID and LUN number*/
2045         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2046         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2047         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2048         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2049
2050         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2051         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2052
2053         /* Total Data and protection segment(s) */
2054         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2055
2056         /* Build IOCB segments and adjust for data protection segments */
2057         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2058             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2059                 QLA_SUCCESS)
2060                 goto queuing_error;
2061
2062         cmd_pkt->entry_count = (uint8_t)req_cnt;
2063         cmd_pkt->timeout = cpu_to_le16(0);
2064         wmb();
2065
2066         /* Adjust ring index. */
2067         req->ring_index++;
2068         if (req->ring_index == req->length) {
2069                 req->ring_index = 0;
2070                 req->ring_ptr = req->ring;
2071         } else
2072                 req->ring_ptr++;
2073
2074         /* Set chip new ring index. */
2075         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2076
2077         /* Manage unprocessed RIO/ZIO commands in response queue. */
2078         if (vha->flags.process_response_queue &&
2079             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2080                 qla24xx_process_response_queue(vha, rsp);
2081
2082         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2083
2084         return QLA_SUCCESS;
2085
2086 queuing_error:
2087         if (status & QDSS_GOT_Q_SPACE) {
2088                 req->outstanding_cmds[handle] = NULL;
2089                 req->cnt += req_cnt;
2090         }
2091         /* Cleanup will be performed by the caller (queuecommand) */
2092
2093         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2094         return QLA_FUNCTION_FAILED;
2095 }
2096
2097 /* Generic Control-SRB manipulation functions. */
2098
2099 /* hardware_lock assumed to be held. */
2100
2101 void *
2102 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2103 {
2104         scsi_qla_host_t *vha = qpair->vha;
2105         struct qla_hw_data *ha = vha->hw;
2106         struct req_que *req = qpair->req;
2107         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2108         uint32_t index, handle;
2109         request_t *pkt;
2110         uint16_t cnt, req_cnt;
2111
2112         pkt = NULL;
2113         req_cnt = 1;
2114         handle = 0;
2115
2116         if (sp && (sp->type != SRB_SCSI_CMD)) {
2117                 /* Adjust entry-counts as needed. */
2118                 req_cnt = sp->iocbs;
2119         }
2120
2121         /* Check for room on request queue. */
2122         if (req->cnt < req_cnt + 2) {
2123                 if (qpair->use_shadow_reg)
2124                         cnt = *req->out_ptr;
2125                 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2126                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2127                 else if (IS_P3P_TYPE(ha))
2128                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2129                 else if (IS_FWI2_CAPABLE(ha))
2130                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2131                 else if (IS_QLAFX00(ha))
2132                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2133                 else
2134                         cnt = qla2x00_debounce_register(
2135                             ISP_REQ_Q_OUT(ha, &reg->isp));
2136
2137                 if  (req->ring_index < cnt)
2138                         req->cnt = cnt - req->ring_index;
2139                 else
2140                         req->cnt = req->length -
2141                             (req->ring_index - cnt);
2142         }
2143         if (req->cnt < req_cnt + 2)
2144                 goto queuing_error;
2145
2146         if (sp) {
2147                 /* Check for room in outstanding command list. */
2148                 handle = req->current_outstanding_cmd;
2149                 for (index = 1; index < req->num_outstanding_cmds; index++) {
2150                         handle++;
2151                         if (handle == req->num_outstanding_cmds)
2152                                 handle = 1;
2153                         if (!req->outstanding_cmds[handle])
2154                                 break;
2155                 }
2156                 if (index == req->num_outstanding_cmds) {
2157                         ql_log(ql_log_warn, vha, 0x700b,
2158                             "No room on outstanding cmd array.\n");
2159                         goto queuing_error;
2160                 }
2161
2162                 /* Prep command array. */
2163                 req->current_outstanding_cmd = handle;
2164                 req->outstanding_cmds[handle] = sp;
2165                 sp->handle = handle;
2166         }
2167
2168         /* Prep packet */
2169         req->cnt -= req_cnt;
2170         pkt = req->ring_ptr;
2171         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2172         if (IS_QLAFX00(ha)) {
2173                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2174                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2175         } else {
2176                 pkt->entry_count = req_cnt;
2177                 pkt->handle = handle;
2178         }
2179
2180         return pkt;
2181
2182 queuing_error:
2183         qpair->tgt_counters.num_alloc_iocb_failed++;
2184         return pkt;
2185 }
2186
2187 void *
2188 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2189 {
2190         scsi_qla_host_t *vha = qpair->vha;
2191
2192         if (qla2x00_reset_active(vha))
2193                 return NULL;
2194
2195         return __qla2x00_alloc_iocbs(qpair, sp);
2196 }
2197
2198 void *
2199 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2200 {
2201         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2202 }
2203
2204 static void
2205 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2206 {
2207         struct srb_iocb *lio = &sp->u.iocb_cmd;
2208
2209         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2210         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2211         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2212                 logio->control_flags |= LCF_NVME_PRLI;
2213
2214         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2215         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2216         logio->port_id[1] = sp->fcport->d_id.b.area;
2217         logio->port_id[2] = sp->fcport->d_id.b.domain;
2218         logio->vp_index = sp->vha->vp_idx;
2219 }
2220
2221 static void
2222 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2223 {
2224         struct srb_iocb *lio = &sp->u.iocb_cmd;
2225
2226         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2227         if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2228                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2229         } else {
2230                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2231                 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2232                         logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2233                 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2234                         logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2235         }
2236         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2237         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2238         logio->port_id[1] = sp->fcport->d_id.b.area;
2239         logio->port_id[2] = sp->fcport->d_id.b.domain;
2240         logio->vp_index = sp->vha->vp_idx;
2241 }
2242
2243 static void
2244 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2245 {
2246         struct qla_hw_data *ha = sp->vha->hw;
2247         struct srb_iocb *lio = &sp->u.iocb_cmd;
2248         uint16_t opts;
2249
2250         mbx->entry_type = MBX_IOCB_TYPE;
2251         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2252         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2253         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2254         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2255         if (HAS_EXTENDED_IDS(ha)) {
2256                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2257                 mbx->mb10 = cpu_to_le16(opts);
2258         } else {
2259                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2260         }
2261         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2262         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2263             sp->fcport->d_id.b.al_pa);
2264         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2265 }
2266
2267 static void
2268 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2269 {
2270         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2271         logio->control_flags =
2272             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2273         if (!sp->fcport->se_sess ||
2274             !sp->fcport->keep_nport_handle)
2275                 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2276         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2277         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2278         logio->port_id[1] = sp->fcport->d_id.b.area;
2279         logio->port_id[2] = sp->fcport->d_id.b.domain;
2280         logio->vp_index = sp->vha->vp_idx;
2281 }
2282
2283 static void
2284 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2285 {
2286         struct qla_hw_data *ha = sp->vha->hw;
2287
2288         mbx->entry_type = MBX_IOCB_TYPE;
2289         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2290         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2291         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2292             cpu_to_le16(sp->fcport->loop_id):
2293             cpu_to_le16(sp->fcport->loop_id << 8);
2294         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2295         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2296             sp->fcport->d_id.b.al_pa);
2297         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2298         /* Implicit: mbx->mbx10 = 0. */
2299 }
2300
2301 static void
2302 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2303 {
2304         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2305         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2306         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2307         logio->vp_index = sp->vha->vp_idx;
2308 }
2309
2310 static void
2311 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2312 {
2313         struct qla_hw_data *ha = sp->vha->hw;
2314
2315         mbx->entry_type = MBX_IOCB_TYPE;
2316         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2317         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2318         if (HAS_EXTENDED_IDS(ha)) {
2319                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2320                 mbx->mb10 = cpu_to_le16(BIT_0);
2321         } else {
2322                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2323         }
2324         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2325         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2326         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2327         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2328         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2329 }
2330
2331 static void
2332 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2333 {
2334         uint32_t flags;
2335         uint64_t lun;
2336         struct fc_port *fcport = sp->fcport;
2337         scsi_qla_host_t *vha = fcport->vha;
2338         struct qla_hw_data *ha = vha->hw;
2339         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2340         struct req_que *req = vha->req;
2341
2342         flags = iocb->u.tmf.flags;
2343         lun = iocb->u.tmf.lun;
2344
2345         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2346         tsk->entry_count = 1;
2347         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2348         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2349         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2350         tsk->control_flags = cpu_to_le32(flags);
2351         tsk->port_id[0] = fcport->d_id.b.al_pa;
2352         tsk->port_id[1] = fcport->d_id.b.area;
2353         tsk->port_id[2] = fcport->d_id.b.domain;
2354         tsk->vp_index = fcport->vha->vp_idx;
2355
2356         if (flags == TCF_LUN_RESET) {
2357                 int_to_scsilun(lun, &tsk->lun);
2358                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2359                         sizeof(tsk->lun));
2360         }
2361 }
2362
2363 static void
2364 qla2x00_els_dcmd_sp_free(void *data)
2365 {
2366         srb_t *sp = data;
2367         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2368
2369         kfree(sp->fcport);
2370
2371         if (elsio->u.els_logo.els_logo_pyld)
2372                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2373                     elsio->u.els_logo.els_logo_pyld,
2374                     elsio->u.els_logo.els_logo_pyld_dma);
2375
2376         del_timer(&elsio->timer);
2377         qla2x00_rel_sp(sp);
2378 }
2379
2380 static void
2381 qla2x00_els_dcmd_iocb_timeout(void *data)
2382 {
2383         srb_t *sp = data;
2384         fc_port_t *fcport = sp->fcport;
2385         struct scsi_qla_host *vha = sp->vha;
2386         struct srb_iocb *lio = &sp->u.iocb_cmd;
2387
2388         ql_dbg(ql_dbg_io, vha, 0x3069,
2389             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2390             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2391             fcport->d_id.b.al_pa);
2392
2393         complete(&lio->u.els_logo.comp);
2394 }
2395
2396 static void
2397 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2398 {
2399         srb_t *sp = ptr;
2400         fc_port_t *fcport = sp->fcport;
2401         struct srb_iocb *lio = &sp->u.iocb_cmd;
2402         struct scsi_qla_host *vha = sp->vha;
2403
2404         ql_dbg(ql_dbg_io, vha, 0x3072,
2405             "%s hdl=%x, portid=%02x%02x%02x done\n",
2406             sp->name, sp->handle, fcport->d_id.b.domain,
2407             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2408
2409         complete(&lio->u.els_logo.comp);
2410 }
2411
2412 int
2413 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2414     port_id_t remote_did)
2415 {
2416         srb_t *sp;
2417         fc_port_t *fcport = NULL;
2418         struct srb_iocb *elsio = NULL;
2419         struct qla_hw_data *ha = vha->hw;
2420         struct els_logo_payload logo_pyld;
2421         int rval = QLA_SUCCESS;
2422
2423         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2424         if (!fcport) {
2425                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2426                return -ENOMEM;
2427         }
2428
2429         /* Alloc SRB structure */
2430         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2431         if (!sp) {
2432                 kfree(fcport);
2433                 ql_log(ql_log_info, vha, 0x70e6,
2434                  "SRB allocation failed\n");
2435                 return -ENOMEM;
2436         }
2437
2438         elsio = &sp->u.iocb_cmd;
2439         fcport->loop_id = 0xFFFF;
2440         fcport->d_id.b.domain = remote_did.b.domain;
2441         fcport->d_id.b.area = remote_did.b.area;
2442         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2443
2444         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2445             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2446
2447         sp->type = SRB_ELS_DCMD;
2448         sp->name = "ELS_DCMD";
2449         sp->fcport = fcport;
2450         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2451         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2452         init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2453         sp->done = qla2x00_els_dcmd_sp_done;
2454         sp->free = qla2x00_els_dcmd_sp_free;
2455
2456         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2457                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2458                             GFP_KERNEL);
2459
2460         if (!elsio->u.els_logo.els_logo_pyld) {
2461                 sp->free(sp);
2462                 return QLA_FUNCTION_FAILED;
2463         }
2464
2465         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2466
2467         elsio->u.els_logo.els_cmd = els_opcode;
2468         logo_pyld.opcode = els_opcode;
2469         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2470         logo_pyld.s_id[1] = vha->d_id.b.area;
2471         logo_pyld.s_id[2] = vha->d_id.b.domain;
2472         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2473         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2474
2475         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2476             sizeof(struct els_logo_payload));
2477
2478         rval = qla2x00_start_sp(sp);
2479         if (rval != QLA_SUCCESS) {
2480                 sp->free(sp);
2481                 return QLA_FUNCTION_FAILED;
2482         }
2483
2484         ql_dbg(ql_dbg_io, vha, 0x3074,
2485             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2486             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2487             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2488
2489         wait_for_completion(&elsio->u.els_logo.comp);
2490
2491         sp->free(sp);
2492         return rval;
2493 }
2494
2495 static void
2496 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2497 {
2498         scsi_qla_host_t *vha = sp->vha;
2499         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2500
2501         els_iocb->entry_type = ELS_IOCB_TYPE;
2502         els_iocb->entry_count = 1;
2503         els_iocb->sys_define = 0;
2504         els_iocb->entry_status = 0;
2505         els_iocb->handle = sp->handle;
2506         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2507         els_iocb->tx_dsd_count = 1;
2508         els_iocb->vp_index = vha->vp_idx;
2509         els_iocb->sof_type = EST_SOFI3;
2510         els_iocb->rx_dsd_count = 0;
2511         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2512
2513         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2514         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2515         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2516         els_iocb->s_id[0] = vha->d_id.b.al_pa;
2517         els_iocb->s_id[1] = vha->d_id.b.area;
2518         els_iocb->s_id[2] = vha->d_id.b.domain;
2519         els_iocb->control_flags = 0;
2520
2521         if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2522                 els_iocb->tx_byte_count = els_iocb->tx_len =
2523                         sizeof(struct els_plogi_payload);
2524                 els_iocb->tx_address[0] =
2525                         cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2526                 els_iocb->tx_address[1] =
2527                         cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2528
2529                 els_iocb->rx_dsd_count = 1;
2530                 els_iocb->rx_byte_count = els_iocb->rx_len =
2531                         sizeof(struct els_plogi_payload);
2532                 els_iocb->rx_address[0] =
2533                         cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2534                 els_iocb->rx_address[1] =
2535                         cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2536
2537                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2538                     "PLOGI ELS IOCB:\n");
2539                 ql_dump_buffer(ql_log_info, vha, 0x0109,
2540                     (uint8_t *)els_iocb,
2541                     sizeof(*els_iocb));
2542         } else {
2543                 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2544                 els_iocb->tx_address[0] =
2545                     cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2546                 els_iocb->tx_address[1] =
2547                     cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2548                 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2549
2550                 els_iocb->rx_byte_count = 0;
2551                 els_iocb->rx_address[0] = 0;
2552                 els_iocb->rx_address[1] = 0;
2553                 els_iocb->rx_len = 0;
2554         }
2555
2556         sp->vha->qla_stats.control_requests++;
2557 }
2558
2559 static void
2560 qla2x00_els_dcmd2_iocb_timeout(void *data)
2561 {
2562         srb_t *sp = data;
2563         fc_port_t *fcport = sp->fcport;
2564         struct scsi_qla_host *vha = sp->vha;
2565         struct qla_hw_data *ha = vha->hw;
2566         unsigned long flags = 0;
2567         int res;
2568
2569         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2570             "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2571             sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2572
2573         /* Abort the exchange */
2574         spin_lock_irqsave(&ha->hardware_lock, flags);
2575         res = ha->isp_ops->abort_command(sp);
2576         ql_dbg(ql_dbg_io, vha, 0x3070,
2577             "mbx abort_command %s\n",
2578             (res == QLA_SUCCESS) ? "successful" : "failed");
2579         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2580
2581         sp->done(sp, QLA_FUNCTION_TIMEOUT);
2582 }
2583
2584 static void
2585 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2586 {
2587         srb_t *sp = ptr;
2588         fc_port_t *fcport = sp->fcport;
2589         struct srb_iocb *lio = &sp->u.iocb_cmd;
2590         struct scsi_qla_host *vha = sp->vha;
2591         struct event_arg ea;
2592         struct qla_work_evt *e;
2593
2594         ql_dbg(ql_dbg_disc, vha, 0x3072,
2595             "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2596             sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2597
2598         fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2599         del_timer(&sp->u.iocb_cmd.timer);
2600
2601         if (sp->flags & SRB_WAKEUP_ON_COMP)
2602                 complete(&lio->u.els_plogi.comp);
2603         else {
2604                 if (res) {
2605                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2606                 } else {
2607                         memset(&ea, 0, sizeof(ea));
2608                         ea.fcport = fcport;
2609                         ea.rc = res;
2610                         ea.event = FCME_ELS_PLOGI_DONE;
2611                         qla2x00_fcport_event_handler(vha, &ea);
2612                 }
2613
2614                 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2615                 if (!e) {
2616                         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2617
2618                         if (elsio->u.els_plogi.els_plogi_pyld)
2619                                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2620                                     elsio->u.els_plogi.tx_size,
2621                                     elsio->u.els_plogi.els_plogi_pyld,
2622                                     elsio->u.els_plogi.els_plogi_pyld_dma);
2623
2624                         if (elsio->u.els_plogi.els_resp_pyld)
2625                                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2626                                     elsio->u.els_plogi.rx_size,
2627                                     elsio->u.els_plogi.els_resp_pyld,
2628                                     elsio->u.els_plogi.els_resp_pyld_dma);
2629                         sp->free(sp);
2630                         return;
2631                 }
2632                 e->u.iosb.sp = sp;
2633                 qla2x00_post_work(vha, e);
2634         }
2635 }
2636
2637 int
2638 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2639     fc_port_t *fcport, bool wait)
2640 {
2641         srb_t *sp;
2642         struct srb_iocb *elsio = NULL;
2643         struct qla_hw_data *ha = vha->hw;
2644         int rval = QLA_SUCCESS;
2645         void    *ptr, *resp_ptr;
2646         dma_addr_t ptr_dma;
2647
2648         /* Alloc SRB structure */
2649         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2650         if (!sp) {
2651                 ql_log(ql_log_info, vha, 0x70e6,
2652                  "SRB allocation failed\n");
2653                 return -ENOMEM;
2654         }
2655
2656         elsio = &sp->u.iocb_cmd;
2657         ql_dbg(ql_dbg_io, vha, 0x3073,
2658             "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2659
2660         fcport->flags |= FCF_ASYNC_SENT;
2661         sp->type = SRB_ELS_DCMD;
2662         sp->name = "ELS_DCMD";
2663         sp->fcport = fcport;
2664
2665         elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2666         init_completion(&elsio->u.els_plogi.comp);
2667         if (wait)
2668                 sp->flags = SRB_WAKEUP_ON_COMP;
2669
2670         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2671
2672         sp->done = qla2x00_els_dcmd2_sp_done;
2673         elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2674
2675         ptr = elsio->u.els_plogi.els_plogi_pyld =
2676             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2677                 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2678         ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
2679
2680         if (!elsio->u.els_plogi.els_plogi_pyld) {
2681                 rval = QLA_FUNCTION_FAILED;
2682                 goto out;
2683         }
2684
2685         resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2686             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2687                 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2688
2689         if (!elsio->u.els_plogi.els_resp_pyld) {
2690                 rval = QLA_FUNCTION_FAILED;
2691                 goto out;
2692         }
2693
2694         ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2695
2696         memset(ptr, 0, sizeof(struct els_plogi_payload));
2697         memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2698         memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2699             &ha->plogi_els_payld.data,
2700             sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2701
2702         elsio->u.els_plogi.els_cmd = els_opcode;
2703         elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2704
2705         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2706         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2707             (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
2708             sizeof(*elsio->u.els_plogi.els_plogi_pyld));
2709
2710         rval = qla2x00_start_sp(sp);
2711         if (rval != QLA_SUCCESS) {
2712                 rval = QLA_FUNCTION_FAILED;
2713         } else {
2714                 ql_dbg(ql_dbg_disc, vha, 0x3074,
2715                     "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2716                     sp->name, sp->handle, fcport->loop_id,
2717                     fcport->d_id.b24, vha->d_id.b24);
2718         }
2719
2720         if (wait) {
2721                 wait_for_completion(&elsio->u.els_plogi.comp);
2722
2723                 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2724                         rval = QLA_FUNCTION_FAILED;
2725         } else {
2726                 goto done;
2727         }
2728
2729 out:
2730         fcport->flags &= ~(FCF_ASYNC_SENT);
2731         if (elsio->u.els_plogi.els_plogi_pyld)
2732                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2733                     elsio->u.els_plogi.tx_size,
2734                     elsio->u.els_plogi.els_plogi_pyld,
2735                     elsio->u.els_plogi.els_plogi_pyld_dma);
2736
2737         if (elsio->u.els_plogi.els_resp_pyld)
2738                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2739                     elsio->u.els_plogi.rx_size,
2740                     elsio->u.els_plogi.els_resp_pyld,
2741                     elsio->u.els_plogi.els_resp_pyld_dma);
2742
2743         sp->free(sp);
2744 done:
2745         return rval;
2746 }
2747
2748 static void
2749 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2750 {
2751         struct bsg_job *bsg_job = sp->u.bsg_job;
2752         struct fc_bsg_request *bsg_request = bsg_job->request;
2753
2754         els_iocb->entry_type = ELS_IOCB_TYPE;
2755         els_iocb->entry_count = 1;
2756         els_iocb->sys_define = 0;
2757         els_iocb->entry_status = 0;
2758         els_iocb->handle = sp->handle;
2759         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2760         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2761         els_iocb->vp_index = sp->vha->vp_idx;
2762         els_iocb->sof_type = EST_SOFI3;
2763         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2764
2765         els_iocb->opcode =
2766             sp->type == SRB_ELS_CMD_RPT ?
2767             bsg_request->rqst_data.r_els.els_code :
2768             bsg_request->rqst_data.h_els.command_code;
2769         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2770         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2771         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2772         els_iocb->control_flags = 0;
2773         els_iocb->rx_byte_count =
2774             cpu_to_le32(bsg_job->reply_payload.payload_len);
2775         els_iocb->tx_byte_count =
2776             cpu_to_le32(bsg_job->request_payload.payload_len);
2777
2778         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2779             (bsg_job->request_payload.sg_list)));
2780         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2781             (bsg_job->request_payload.sg_list)));
2782         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2783             (bsg_job->request_payload.sg_list));
2784
2785         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2786             (bsg_job->reply_payload.sg_list)));
2787         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2788             (bsg_job->reply_payload.sg_list)));
2789         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2790             (bsg_job->reply_payload.sg_list));
2791
2792         sp->vha->qla_stats.control_requests++;
2793 }
2794
2795 static void
2796 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2797 {
2798         uint16_t        avail_dsds;
2799         uint32_t        *cur_dsd;
2800         struct scatterlist *sg;
2801         int index;
2802         uint16_t tot_dsds;
2803         scsi_qla_host_t *vha = sp->vha;
2804         struct qla_hw_data *ha = vha->hw;
2805         struct bsg_job *bsg_job = sp->u.bsg_job;
2806         int loop_iterartion = 0;
2807         int entry_count = 1;
2808
2809         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2810         ct_iocb->entry_type = CT_IOCB_TYPE;
2811         ct_iocb->entry_status = 0;
2812         ct_iocb->handle1 = sp->handle;
2813         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2814         ct_iocb->status = cpu_to_le16(0);
2815         ct_iocb->control_flags = cpu_to_le16(0);
2816         ct_iocb->timeout = 0;
2817         ct_iocb->cmd_dsd_count =
2818             cpu_to_le16(bsg_job->request_payload.sg_cnt);
2819         ct_iocb->total_dsd_count =
2820             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2821         ct_iocb->req_bytecount =
2822             cpu_to_le32(bsg_job->request_payload.payload_len);
2823         ct_iocb->rsp_bytecount =
2824             cpu_to_le32(bsg_job->reply_payload.payload_len);
2825
2826         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2827             (bsg_job->request_payload.sg_list)));
2828         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2829             (bsg_job->request_payload.sg_list)));
2830         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2831
2832         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2833             (bsg_job->reply_payload.sg_list)));
2834         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2835             (bsg_job->reply_payload.sg_list)));
2836         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2837
2838         avail_dsds = 1;
2839         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2840         index = 0;
2841         tot_dsds = bsg_job->reply_payload.sg_cnt;
2842
2843         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2844                 dma_addr_t       sle_dma;
2845                 cont_a64_entry_t *cont_pkt;
2846
2847                 /* Allocate additional continuation packets? */
2848                 if (avail_dsds == 0) {
2849                         /*
2850                         * Five DSDs are available in the Cont.
2851                         * Type 1 IOCB.
2852                                */
2853                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2854                             vha->hw->req_q_map[0]);
2855                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2856                         avail_dsds = 5;
2857                         entry_count++;
2858                 }
2859
2860                 sle_dma = sg_dma_address(sg);
2861                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2862                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2863                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2864                 loop_iterartion++;
2865                 avail_dsds--;
2866         }
2867         ct_iocb->entry_count = entry_count;
2868
2869         sp->vha->qla_stats.control_requests++;
2870 }
2871
2872 static void
2873 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2874 {
2875         uint16_t        avail_dsds;
2876         uint32_t        *cur_dsd;
2877         struct scatterlist *sg;
2878         int index;
2879         uint16_t cmd_dsds, rsp_dsds;
2880         scsi_qla_host_t *vha = sp->vha;
2881         struct qla_hw_data *ha = vha->hw;
2882         struct bsg_job *bsg_job = sp->u.bsg_job;
2883         int entry_count = 1;
2884         cont_a64_entry_t *cont_pkt = NULL;
2885
2886         ct_iocb->entry_type = CT_IOCB_TYPE;
2887         ct_iocb->entry_status = 0;
2888         ct_iocb->sys_define = 0;
2889         ct_iocb->handle = sp->handle;
2890
2891         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2892         ct_iocb->vp_index = sp->vha->vp_idx;
2893         ct_iocb->comp_status = cpu_to_le16(0);
2894
2895         cmd_dsds = bsg_job->request_payload.sg_cnt;
2896         rsp_dsds = bsg_job->reply_payload.sg_cnt;
2897
2898         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2899         ct_iocb->timeout = 0;
2900         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2901         ct_iocb->cmd_byte_count =
2902             cpu_to_le32(bsg_job->request_payload.payload_len);
2903
2904         avail_dsds = 2;
2905         cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2906         index = 0;
2907
2908         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2909                 dma_addr_t       sle_dma;
2910
2911                 /* Allocate additional continuation packets? */
2912                 if (avail_dsds == 0) {
2913                         /*
2914                          * Five DSDs are available in the Cont.
2915                          * Type 1 IOCB.
2916                          */
2917                         cont_pkt = qla2x00_prep_cont_type1_iocb(
2918                             vha, ha->req_q_map[0]);
2919                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2920                         avail_dsds = 5;
2921                         entry_count++;
2922                 }
2923
2924                 sle_dma = sg_dma_address(sg);
2925                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2926                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2927                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2928                 avail_dsds--;
2929         }
2930
2931         index = 0;
2932
2933         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2934                 dma_addr_t       sle_dma;
2935
2936                 /* Allocate additional continuation packets? */
2937                 if (avail_dsds == 0) {
2938                         /*
2939                         * Five DSDs are available in the Cont.
2940                         * Type 1 IOCB.
2941                                */
2942                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2943                             ha->req_q_map[0]);
2944                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2945                         avail_dsds = 5;
2946                         entry_count++;
2947                 }
2948
2949                 sle_dma = sg_dma_address(sg);
2950                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2951                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2952                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2953                 avail_dsds--;
2954         }
2955         ct_iocb->entry_count = entry_count;
2956 }
2957
2958 /*
2959  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2960  * @sp: command to send to the ISP
2961  *
2962  * Returns non-zero if a failure occurred, else zero.
2963  */
2964 int
2965 qla82xx_start_scsi(srb_t *sp)
2966 {
2967         int             nseg;
2968         unsigned long   flags;
2969         struct scsi_cmnd *cmd;
2970         uint32_t        *clr_ptr;
2971         uint32_t        index;
2972         uint32_t        handle;
2973         uint16_t        cnt;
2974         uint16_t        req_cnt;
2975         uint16_t        tot_dsds;
2976         struct device_reg_82xx __iomem *reg;
2977         uint32_t dbval;
2978         uint32_t *fcp_dl;
2979         uint8_t additional_cdb_len;
2980         struct ct6_dsd *ctx;
2981         struct scsi_qla_host *vha = sp->vha;
2982         struct qla_hw_data *ha = vha->hw;
2983         struct req_que *req = NULL;
2984         struct rsp_que *rsp = NULL;
2985
2986         /* Setup device pointers. */
2987         reg = &ha->iobase->isp82;
2988         cmd = GET_CMD_SP(sp);
2989         req = vha->req;
2990         rsp = ha->rsp_q_map[0];
2991
2992         /* So we know we haven't pci_map'ed anything yet */
2993         tot_dsds = 0;
2994
2995         dbval = 0x04 | (ha->portnum << 5);
2996
2997         /* Send marker if required */
2998         if (vha->marker_needed != 0) {
2999                 if (qla2x00_marker(vha, req,
3000                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3001                         ql_log(ql_log_warn, vha, 0x300c,
3002                             "qla2x00_marker failed for cmd=%p.\n", cmd);
3003                         return QLA_FUNCTION_FAILED;
3004                 }
3005                 vha->marker_needed = 0;
3006         }
3007
3008         /* Acquire ring specific lock */
3009         spin_lock_irqsave(&ha->hardware_lock, flags);
3010
3011         /* Check for room in outstanding command list. */
3012         handle = req->current_outstanding_cmd;
3013         for (index = 1; index < req->num_outstanding_cmds; index++) {
3014                 handle++;
3015                 if (handle == req->num_outstanding_cmds)
3016                         handle = 1;
3017                 if (!req->outstanding_cmds[handle])
3018                         break;
3019         }
3020         if (index == req->num_outstanding_cmds)
3021                 goto queuing_error;
3022
3023         /* Map the sg table so we have an accurate count of sg entries needed */
3024         if (scsi_sg_count(cmd)) {
3025                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3026                     scsi_sg_count(cmd), cmd->sc_data_direction);
3027                 if (unlikely(!nseg))
3028                         goto queuing_error;
3029         } else
3030                 nseg = 0;
3031
3032         tot_dsds = nseg;
3033
3034         if (tot_dsds > ql2xshiftctondsd) {
3035                 struct cmd_type_6 *cmd_pkt;
3036                 uint16_t more_dsd_lists = 0;
3037                 struct dsd_dma *dsd_ptr;
3038                 uint16_t i;
3039
3040                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3041                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3042                         ql_dbg(ql_dbg_io, vha, 0x300d,
3043                             "Num of DSD list %d is than %d for cmd=%p.\n",
3044                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3045                             cmd);
3046                         goto queuing_error;
3047                 }
3048
3049                 if (more_dsd_lists <= ha->gbl_dsd_avail)
3050                         goto sufficient_dsds;
3051                 else
3052                         more_dsd_lists -= ha->gbl_dsd_avail;
3053
3054                 for (i = 0; i < more_dsd_lists; i++) {
3055                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3056                         if (!dsd_ptr) {
3057                                 ql_log(ql_log_fatal, vha, 0x300e,
3058                                     "Failed to allocate memory for dsd_dma "
3059                                     "for cmd=%p.\n", cmd);
3060                                 goto queuing_error;
3061                         }
3062
3063                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3064                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3065                         if (!dsd_ptr->dsd_addr) {
3066                                 kfree(dsd_ptr);
3067                                 ql_log(ql_log_fatal, vha, 0x300f,
3068                                     "Failed to allocate memory for dsd_addr "
3069                                     "for cmd=%p.\n", cmd);
3070                                 goto queuing_error;
3071                         }
3072                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3073                         ha->gbl_dsd_avail++;
3074                 }
3075
3076 sufficient_dsds:
3077                 req_cnt = 1;
3078
3079                 if (req->cnt < (req_cnt + 2)) {
3080                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3081                                 &reg->req_q_out[0]);
3082                         if (req->ring_index < cnt)
3083                                 req->cnt = cnt - req->ring_index;
3084                         else
3085                                 req->cnt = req->length -
3086                                         (req->ring_index - cnt);
3087                         if (req->cnt < (req_cnt + 2))
3088                                 goto queuing_error;
3089                 }
3090
3091                 ctx = sp->u.scmd.ctx =
3092                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3093                 if (!ctx) {
3094                         ql_log(ql_log_fatal, vha, 0x3010,
3095                             "Failed to allocate ctx for cmd=%p.\n", cmd);
3096                         goto queuing_error;
3097                 }
3098
3099                 memset(ctx, 0, sizeof(struct ct6_dsd));
3100                 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3101                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3102                 if (!ctx->fcp_cmnd) {
3103                         ql_log(ql_log_fatal, vha, 0x3011,
3104                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3105                         goto queuing_error;
3106                 }
3107
3108                 /* Initialize the DSD list and dma handle */
3109                 INIT_LIST_HEAD(&ctx->dsd_list);
3110                 ctx->dsd_use_cnt = 0;
3111
3112                 if (cmd->cmd_len > 16) {
3113                         additional_cdb_len = cmd->cmd_len - 16;
3114                         if ((cmd->cmd_len % 4) != 0) {
3115                                 /* SCSI command bigger than 16 bytes must be
3116                                  * multiple of 4
3117                                  */
3118                                 ql_log(ql_log_warn, vha, 0x3012,
3119                                     "scsi cmd len %d not multiple of 4 "
3120                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3121                                 goto queuing_error_fcp_cmnd;
3122                         }
3123                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3124                 } else {
3125                         additional_cdb_len = 0;
3126                         ctx->fcp_cmnd_len = 12 + 16 + 4;
3127                 }
3128
3129                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3130                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3131
3132                 /* Zero out remaining portion of packet. */
3133                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3134                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3135                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3136                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3137
3138                 /* Set NPORT-ID and LUN number*/
3139                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3140                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3141                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3142                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3143                 cmd_pkt->vp_index = sp->vha->vp_idx;
3144
3145                 /* Build IOCB segments */
3146                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3147                         goto queuing_error_fcp_cmnd;
3148
3149                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3150                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3151
3152                 /* build FCP_CMND IU */
3153                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3154                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3155
3156                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3157                         ctx->fcp_cmnd->additional_cdb_len |= 1;
3158                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3159                         ctx->fcp_cmnd->additional_cdb_len |= 2;
3160
3161                 /* Populate the FCP_PRIO. */
3162                 if (ha->flags.fcp_prio_enabled)
3163                         ctx->fcp_cmnd->task_attribute |=
3164                             sp->fcport->fcp_prio << 3;
3165
3166                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3167
3168                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3169                     additional_cdb_len);
3170                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3171
3172                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3173                 cmd_pkt->fcp_cmnd_dseg_address[0] =
3174                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3175                 cmd_pkt->fcp_cmnd_dseg_address[1] =
3176                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3177
3178                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3179                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3180                 /* Set total data segment count. */
3181                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3182                 /* Specify response queue number where
3183                  * completion should happen
3184                  */
3185                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3186         } else {
3187                 struct cmd_type_7 *cmd_pkt;
3188                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3189                 if (req->cnt < (req_cnt + 2)) {
3190                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3191                             &reg->req_q_out[0]);
3192                         if (req->ring_index < cnt)
3193                                 req->cnt = cnt - req->ring_index;
3194                         else
3195                                 req->cnt = req->length -
3196                                         (req->ring_index - cnt);
3197                 }
3198                 if (req->cnt < (req_cnt + 2))
3199                         goto queuing_error;
3200
3201                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3202                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3203
3204                 /* Zero out remaining portion of packet. */
3205                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3206                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3207                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3208                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3209
3210                 /* Set NPORT-ID and LUN number*/
3211                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3212                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3213                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3214                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3215                 cmd_pkt->vp_index = sp->vha->vp_idx;
3216
3217                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3218                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3219                     sizeof(cmd_pkt->lun));
3220
3221                 /* Populate the FCP_PRIO. */
3222                 if (ha->flags.fcp_prio_enabled)
3223                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3224
3225                 /* Load SCSI command packet. */
3226                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3227                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3228
3229                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3230
3231                 /* Build IOCB segments */
3232                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3233
3234                 /* Set total data segment count. */
3235                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3236                 /* Specify response queue number where
3237                  * completion should happen.
3238                  */
3239                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3240
3241         }
3242         /* Build command packet. */
3243         req->current_outstanding_cmd = handle;
3244         req->outstanding_cmds[handle] = sp;
3245         sp->handle = handle;
3246         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3247         req->cnt -= req_cnt;
3248         wmb();
3249
3250         /* Adjust ring index. */
3251         req->ring_index++;
3252         if (req->ring_index == req->length) {
3253                 req->ring_index = 0;
3254                 req->ring_ptr = req->ring;
3255         } else
3256                 req->ring_ptr++;
3257
3258         sp->flags |= SRB_DMA_VALID;
3259
3260         /* Set chip new ring index. */
3261         /* write, read and verify logic */
3262         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3263         if (ql2xdbwr)
3264                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3265         else {
3266                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3267                 wmb();
3268                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3269                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3270                         wmb();
3271                 }
3272         }
3273
3274         /* Manage unprocessed RIO/ZIO commands in response queue. */
3275         if (vha->flags.process_response_queue &&
3276             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3277                 qla24xx_process_response_queue(vha, rsp);
3278
3279         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3280         return QLA_SUCCESS;
3281
3282 queuing_error_fcp_cmnd:
3283         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3284 queuing_error:
3285         if (tot_dsds)
3286                 scsi_dma_unmap(cmd);
3287
3288         if (sp->u.scmd.ctx) {
3289                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3290                 sp->u.scmd.ctx = NULL;
3291         }
3292         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3293
3294         return QLA_FUNCTION_FAILED;
3295 }
3296
3297 static void
3298 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3299 {
3300         struct srb_iocb *aio = &sp->u.iocb_cmd;
3301         scsi_qla_host_t *vha = sp->vha;
3302         struct req_que *req = sp->qpair->req;
3303
3304         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3305         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3306         abt_iocb->entry_count = 1;
3307         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3308         if (sp->fcport) {
3309                 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3310                 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3311                 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3312                 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3313         }
3314         abt_iocb->handle_to_abort =
3315             cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3316                                     aio->u.abt.cmd_hndl));
3317         abt_iocb->vp_index = vha->vp_idx;
3318         abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3319         /* Send the command to the firmware */
3320         wmb();
3321 }
3322
3323 static void
3324 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3325 {
3326         int i, sz;
3327
3328         mbx->entry_type = MBX_IOCB_TYPE;
3329         mbx->handle = sp->handle;
3330         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3331
3332         for (i = 0; i < sz; i++)
3333                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3334 }
3335
3336 static void
3337 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3338 {
3339         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3340         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3341         ct_pkt->handle = sp->handle;
3342 }
3343
3344 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3345         struct nack_to_isp *nack)
3346 {
3347         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3348
3349         nack->entry_type = NOTIFY_ACK_TYPE;
3350         nack->entry_count = 1;
3351         nack->ox_id = ntfy->ox_id;
3352
3353         nack->u.isp24.handle = sp->handle;
3354         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3355         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3356                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3357                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3358         }
3359         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3360         nack->u.isp24.status = ntfy->u.isp24.status;
3361         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3362         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3363         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3364         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3365         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3366         nack->u.isp24.srr_flags = 0;
3367         nack->u.isp24.srr_reject_code = 0;
3368         nack->u.isp24.srr_reject_code_expl = 0;
3369         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3370 }
3371
3372 /*
3373  * Build NVME LS request
3374  */
3375 static int
3376 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3377 {
3378         struct srb_iocb *nvme;
3379         int     rval = QLA_SUCCESS;
3380
3381         nvme = &sp->u.iocb_cmd;
3382         cmd_pkt->entry_type = PT_LS4_REQUEST;
3383         cmd_pkt->entry_count = 1;
3384         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3385
3386         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3387         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3388         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3389
3390         cmd_pkt->tx_dseg_count = 1;
3391         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3392         cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3393         cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3394         cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3395
3396         cmd_pkt->rx_dseg_count = 1;
3397         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3398         cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
3399         cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3400         cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3401
3402         return rval;
3403 }
3404
3405 static void
3406 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3407 {
3408         int map, pos;
3409
3410         vce->entry_type = VP_CTRL_IOCB_TYPE;
3411         vce->handle = sp->handle;
3412         vce->entry_count = 1;
3413         vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3414         vce->vp_count = cpu_to_le16(1);
3415
3416         /*
3417          * index map in firmware starts with 1; decrement index
3418          * this is ok as we never use index 0
3419          */
3420         map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3421         pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3422         vce->vp_idx_map[map] |= 1 << pos;
3423 }
3424
3425 static void
3426 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3427 {
3428         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3429         logio->control_flags =
3430             cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3431
3432         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3433         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3434         logio->port_id[1] = sp->fcport->d_id.b.area;
3435         logio->port_id[2] = sp->fcport->d_id.b.domain;
3436         logio->vp_index = sp->fcport->vha->vp_idx;
3437 }
3438
3439 int
3440 qla2x00_start_sp(srb_t *sp)
3441 {
3442         int rval;
3443         scsi_qla_host_t *vha = sp->vha;
3444         struct qla_hw_data *ha = vha->hw;
3445         void *pkt;
3446         unsigned long flags;
3447
3448         rval = QLA_FUNCTION_FAILED;
3449         spin_lock_irqsave(&ha->hardware_lock, flags);
3450         pkt = qla2x00_alloc_iocbs(vha, sp);
3451         if (!pkt) {
3452                 ql_log(ql_log_warn, vha, 0x700c,
3453                     "qla2x00_alloc_iocbs failed.\n");
3454                 goto done;
3455         }
3456
3457         rval = QLA_SUCCESS;
3458         switch (sp->type) {
3459         case SRB_LOGIN_CMD:
3460                 IS_FWI2_CAPABLE(ha) ?
3461                     qla24xx_login_iocb(sp, pkt) :
3462                     qla2x00_login_iocb(sp, pkt);
3463                 break;
3464         case SRB_PRLI_CMD:
3465                 qla24xx_prli_iocb(sp, pkt);
3466                 break;
3467         case SRB_LOGOUT_CMD:
3468                 IS_FWI2_CAPABLE(ha) ?
3469                     qla24xx_logout_iocb(sp, pkt) :
3470                     qla2x00_logout_iocb(sp, pkt);
3471                 break;
3472         case SRB_ELS_CMD_RPT:
3473         case SRB_ELS_CMD_HST:
3474                 qla24xx_els_iocb(sp, pkt);
3475                 break;
3476         case SRB_CT_CMD:
3477                 IS_FWI2_CAPABLE(ha) ?
3478                     qla24xx_ct_iocb(sp, pkt) :
3479                     qla2x00_ct_iocb(sp, pkt);
3480                 break;
3481         case SRB_ADISC_CMD:
3482                 IS_FWI2_CAPABLE(ha) ?
3483                     qla24xx_adisc_iocb(sp, pkt) :
3484                     qla2x00_adisc_iocb(sp, pkt);
3485                 break;
3486         case SRB_TM_CMD:
3487                 IS_QLAFX00(ha) ?
3488                     qlafx00_tm_iocb(sp, pkt) :
3489                     qla24xx_tm_iocb(sp, pkt);
3490                 break;
3491         case SRB_FXIOCB_DCMD:
3492         case SRB_FXIOCB_BCMD:
3493                 qlafx00_fxdisc_iocb(sp, pkt);
3494                 break;
3495         case SRB_NVME_LS:
3496                 qla_nvme_ls(sp, pkt);
3497                 break;
3498         case SRB_ABT_CMD:
3499                 IS_QLAFX00(ha) ?
3500                         qlafx00_abort_iocb(sp, pkt) :
3501                         qla24xx_abort_iocb(sp, pkt);
3502                 break;
3503         case SRB_ELS_DCMD:
3504                 qla24xx_els_logo_iocb(sp, pkt);
3505                 break;
3506         case SRB_CT_PTHRU_CMD:
3507                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3508                 break;
3509         case SRB_MB_IOCB:
3510                 qla2x00_mb_iocb(sp, pkt);
3511                 break;
3512         case SRB_NACK_PLOGI:
3513         case SRB_NACK_PRLI:
3514         case SRB_NACK_LOGO:
3515                 qla2x00_send_notify_ack_iocb(sp, pkt);
3516                 break;
3517         case SRB_CTRL_VP:
3518                 qla25xx_ctrlvp_iocb(sp, pkt);
3519                 break;
3520         case SRB_PRLO_CMD:
3521                 qla24xx_prlo_iocb(sp, pkt);
3522                 break;
3523         default:
3524                 break;
3525         }
3526
3527         wmb();
3528         qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3529 done:
3530         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3531         return rval;
3532 }
3533
3534 static void
3535 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3536                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3537 {
3538         uint16_t avail_dsds;
3539         uint32_t *cur_dsd;
3540         uint32_t req_data_len = 0;
3541         uint32_t rsp_data_len = 0;
3542         struct scatterlist *sg;
3543         int index;
3544         int entry_count = 1;
3545         struct bsg_job *bsg_job = sp->u.bsg_job;
3546
3547         /*Update entry type to indicate bidir command */
3548         *((uint32_t *)(&cmd_pkt->entry_type)) =
3549                 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3550
3551         /* Set the transfer direction, in this set both flags
3552          * Also set the BD_WRAP_BACK flag, firmware will take care
3553          * assigning DID=SID for outgoing pkts.
3554          */
3555         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3556         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3557         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3558                                                         BD_WRAP_BACK);
3559
3560         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3561         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3562         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3563         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3564
3565         vha->bidi_stats.transfer_bytes += req_data_len;
3566         vha->bidi_stats.io_count++;
3567
3568         vha->qla_stats.output_bytes += req_data_len;
3569         vha->qla_stats.output_requests++;
3570
3571         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3572          * are bundled in continuation iocb
3573          */
3574         avail_dsds = 1;
3575         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3576
3577         index = 0;
3578
3579         for_each_sg(bsg_job->request_payload.sg_list, sg,
3580                                 bsg_job->request_payload.sg_cnt, index) {
3581                 dma_addr_t sle_dma;
3582                 cont_a64_entry_t *cont_pkt;
3583
3584                 /* Allocate additional continuation packets */
3585                 if (avail_dsds == 0) {
3586                         /* Continuation type 1 IOCB can accomodate
3587                          * 5 DSDS
3588                          */
3589                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3590                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3591                         avail_dsds = 5;
3592                         entry_count++;
3593                 }
3594                 sle_dma = sg_dma_address(sg);
3595                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3596                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3597                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3598                 avail_dsds--;
3599         }
3600         /* For read request DSD will always goes to continuation IOCB
3601          * and follow the write DSD. If there is room on the current IOCB
3602          * then it is added to that IOCB else new continuation IOCB is
3603          * allocated.
3604          */
3605         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3606                                 bsg_job->reply_payload.sg_cnt, index) {
3607                 dma_addr_t sle_dma;
3608                 cont_a64_entry_t *cont_pkt;
3609
3610                 /* Allocate additional continuation packets */
3611                 if (avail_dsds == 0) {
3612                         /* Continuation type 1 IOCB can accomodate
3613                          * 5 DSDS
3614                          */
3615                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3616                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3617                         avail_dsds = 5;
3618                         entry_count++;
3619                 }
3620                 sle_dma = sg_dma_address(sg);
3621                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3622                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3623                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3624                 avail_dsds--;
3625         }
3626         /* This value should be same as number of IOCB required for this cmd */
3627         cmd_pkt->entry_count = entry_count;
3628 }
3629
3630 int
3631 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3632 {
3633
3634         struct qla_hw_data *ha = vha->hw;
3635         unsigned long flags;
3636         uint32_t handle;
3637         uint32_t index;
3638         uint16_t req_cnt;
3639         uint16_t cnt;
3640         uint32_t *clr_ptr;
3641         struct cmd_bidir *cmd_pkt = NULL;
3642         struct rsp_que *rsp;
3643         struct req_que *req;
3644         int rval = EXT_STATUS_OK;
3645
3646         rval = QLA_SUCCESS;
3647
3648         rsp = ha->rsp_q_map[0];
3649         req = vha->req;
3650
3651         /* Send marker if required */
3652         if (vha->marker_needed != 0) {
3653                 if (qla2x00_marker(vha, req,
3654                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3655                         return EXT_STATUS_MAILBOX;
3656                 vha->marker_needed = 0;
3657         }
3658
3659         /* Acquire ring specific lock */
3660         spin_lock_irqsave(&ha->hardware_lock, flags);
3661
3662         /* Check for room in outstanding command list. */
3663         handle = req->current_outstanding_cmd;
3664         for (index = 1; index < req->num_outstanding_cmds; index++) {
3665                 handle++;
3666                 if (handle == req->num_outstanding_cmds)
3667                         handle = 1;
3668                 if (!req->outstanding_cmds[handle])
3669                         break;
3670         }
3671
3672         if (index == req->num_outstanding_cmds) {
3673                 rval = EXT_STATUS_BUSY;
3674                 goto queuing_error;
3675         }
3676
3677         /* Calculate number of IOCB required */
3678         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3679
3680         /* Check for room on request queue. */
3681         if (req->cnt < req_cnt + 2) {
3682                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3683                     RD_REG_DWORD_RELAXED(req->req_q_out);
3684                 if  (req->ring_index < cnt)
3685                         req->cnt = cnt - req->ring_index;
3686                 else
3687                         req->cnt = req->length -
3688                                 (req->ring_index - cnt);
3689         }
3690         if (req->cnt < req_cnt + 2) {
3691                 rval = EXT_STATUS_BUSY;
3692                 goto queuing_error;
3693         }
3694
3695         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3696         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3697
3698         /* Zero out remaining portion of packet. */
3699         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3700         clr_ptr = (uint32_t *)cmd_pkt + 2;
3701         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3702
3703         /* Set NPORT-ID  (of vha)*/
3704         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3705         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3706         cmd_pkt->port_id[1] = vha->d_id.b.area;
3707         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3708
3709         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3710         cmd_pkt->entry_status = (uint8_t) rsp->id;
3711         /* Build command packet. */
3712         req->current_outstanding_cmd = handle;
3713         req->outstanding_cmds[handle] = sp;
3714         sp->handle = handle;
3715         req->cnt -= req_cnt;
3716
3717         /* Send the command to the firmware */
3718         wmb();
3719         qla2x00_start_iocbs(vha, req);
3720 queuing_error:
3721         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3722         return rval;
3723 }