2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds)
58 iocbs += (dsds - 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds)
80 iocbs += (dsds - 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
100 if (req->ring_index == req->length) {
102 req->ring_ptr = req->ring;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
128 if (req->ring_index == req->length) {
130 req->ring_ptr = req->ring;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 cpu_to_le32(CONTINUE_A64_TYPE);
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151 /* We always use DIFF Bundling for best performance */
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 return scsi_prot_sg_count(cmd);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
201 cmd = GET_CMD_SP(sp);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
205 cpu_to_le32(COMMAND_TYPE);
207 /* No data transfer */
208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 cmd_pkt->byte_count = cpu_to_le32(0);
214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216 /* Three DSDs are available in the Command Type 2 IOCB */
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220 /* Load data segments */
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
227 * Seven DSDs are available in the Continuation
230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 scsi_qla_host_t *vha;
255 struct scsi_cmnd *cmd;
256 struct scatterlist *sg;
259 cmd = GET_CMD_SP(sp);
261 /* Update entry type to indicate Command Type 3 IOCB */
262 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
264 /* No data transfer */
265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266 cmd_pkt->byte_count = cpu_to_le32(0);
271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273 /* Two DSDs are available in the Command Type 3 IOCB */
275 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277 /* Load data segments */
278 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280 cont_a64_entry_t *cont_pkt;
282 /* Allocate additional continuation packets? */
283 if (avail_dsds == 0) {
285 * Five DSDs are available in the Continuation
288 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293 sle_dma = sg_dma_address(sg);
294 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
303 * @sp: command to send to the ISP
305 * Returns non-zero if a failure occurred, else zero.
308 qla2x00_start_scsi(srb_t *sp)
312 scsi_qla_host_t *vha;
313 struct scsi_cmnd *cmd;
317 cmd_entry_t *cmd_pkt;
321 struct device_reg_2xxx __iomem *reg;
322 struct qla_hw_data *ha;
326 /* Setup device pointers. */
329 reg = &ha->iobase->isp;
330 cmd = GET_CMD_SP(sp);
331 req = ha->req_q_map[0];
332 rsp = ha->rsp_q_map[0];
333 /* So we know we haven't pci_map'ed anything yet */
336 /* Send marker if required */
337 if (vha->marker_needed != 0) {
338 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
340 return (QLA_FUNCTION_FAILED);
342 vha->marker_needed = 0;
345 /* Acquire ring specific lock */
346 spin_lock_irqsave(&ha->hardware_lock, flags);
348 /* Check for room in outstanding command list. */
349 handle = req->current_outstanding_cmd;
350 for (index = 1; index < req->num_outstanding_cmds; index++) {
352 if (handle == req->num_outstanding_cmds)
354 if (!req->outstanding_cmds[handle])
357 if (index == req->num_outstanding_cmds)
360 /* Map the sg table so we have an accurate count of sg entries needed */
361 if (scsi_sg_count(cmd)) {
362 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363 scsi_sg_count(cmd), cmd->sc_data_direction);
371 /* Calculate the number of request entries needed. */
372 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
373 if (req->cnt < (req_cnt + 2)) {
374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375 if (req->ring_index < cnt)
376 req->cnt = cnt - req->ring_index;
378 req->cnt = req->length -
379 (req->ring_index - cnt);
380 /* If still no head room then bail out */
381 if (req->cnt < (req_cnt + 2))
385 /* Build command packet */
386 req->current_outstanding_cmd = handle;
387 req->outstanding_cmds[handle] = sp;
389 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
392 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
393 cmd_pkt->handle = handle;
394 /* Zero out remaining portion of packet. */
395 clr_ptr = (uint32_t *)cmd_pkt + 2;
396 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399 /* Set target ID and LUN number*/
400 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
401 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
402 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
404 /* Load SCSI command packet. */
405 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
406 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
408 /* Build IOCB segments */
409 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
411 /* Set total data segment count. */
412 cmd_pkt->entry_count = (uint8_t)req_cnt;
415 /* Adjust ring index. */
417 if (req->ring_index == req->length) {
419 req->ring_ptr = req->ring;
423 sp->flags |= SRB_DMA_VALID;
425 /* Set chip new ring index. */
426 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
427 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
429 /* Manage unprocessed RIO/ZIO commands in response queue. */
430 if (vha->flags.process_response_queue &&
431 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432 qla2x00_process_response_queue(rsp);
434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 return (QLA_SUCCESS);
441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
443 return (QLA_FUNCTION_FAILED);
447 * qla2x00_start_iocbs() - Execute the IOCB command
450 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
452 struct qla_hw_data *ha = vha->hw;
453 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
455 if (IS_P3P_TYPE(ha)) {
456 qla82xx_start_iocbs(vha);
458 /* Adjust ring index. */
460 if (req->ring_index == req->length) {
462 req->ring_ptr = req->ring;
466 /* Set chip new ring index. */
467 if (ha->mqenable || IS_QLA27XX(ha)) {
468 WRT_REG_DWORD(req->req_q_in, req->ring_index);
469 } else if (IS_QLA83XX(ha)) {
470 WRT_REG_DWORD(req->req_q_in, req->ring_index);
471 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
472 } else if (IS_QLAFX00(ha)) {
473 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
475 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
476 } else if (IS_FWI2_CAPABLE(ha)) {
477 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
478 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
480 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
482 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
488 * qla2x00_marker() - Send a marker IOCB to the firmware.
492 * @type: marker modifier
494 * Can be called from both normal and interrupt context.
496 * Returns non-zero if a failure occurred, else zero.
499 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
500 struct rsp_que *rsp, uint16_t loop_id,
501 uint64_t lun, uint8_t type)
504 struct mrk_entry_24xx *mrk24 = NULL;
506 struct qla_hw_data *ha = vha->hw;
507 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
509 req = ha->req_q_map[0];
510 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
512 ql_log(ql_log_warn, base_vha, 0x3026,
513 "Failed to allocate Marker IOCB.\n");
515 return (QLA_FUNCTION_FAILED);
518 mrk->entry_type = MARKER_TYPE;
519 mrk->modifier = type;
520 if (type != MK_SYNC_ALL) {
521 if (IS_FWI2_CAPABLE(ha)) {
522 mrk24 = (struct mrk_entry_24xx *) mrk;
523 mrk24->nport_handle = cpu_to_le16(loop_id);
524 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
525 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
526 mrk24->vp_index = vha->vp_idx;
527 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
529 SET_TARGET_ID(ha, mrk->target, loop_id);
530 mrk->lun = cpu_to_le16((uint16_t)lun);
535 qla2x00_start_iocbs(vha, req);
537 return (QLA_SUCCESS);
541 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
542 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
546 unsigned long flags = 0;
548 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
549 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
550 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
556 * qla2x00_issue_marker
559 * Caller CAN have hardware lock held as specified by ha_locked parameter.
560 * Might release it, then reaquire.
562 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
565 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
566 MK_SYNC_ALL) != QLA_SUCCESS)
567 return QLA_FUNCTION_FAILED;
569 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
570 MK_SYNC_ALL) != QLA_SUCCESS)
571 return QLA_FUNCTION_FAILED;
573 vha->marker_needed = 0;
579 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
582 uint32_t *cur_dsd = NULL;
583 scsi_qla_host_t *vha;
584 struct qla_hw_data *ha;
585 struct scsi_cmnd *cmd;
586 struct scatterlist *cur_seg;
590 uint8_t first_iocb = 1;
591 uint32_t dsd_list_len;
592 struct dsd_dma *dsd_ptr;
595 cmd = GET_CMD_SP(sp);
597 /* Update entry type to indicate Command Type 3 IOCB */
598 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
600 /* No data transfer */
601 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
602 cmd_pkt->byte_count = cpu_to_le32(0);
609 /* Set transfer direction */
610 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
611 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
612 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
613 vha->qla_stats.output_requests++;
614 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
615 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
616 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
617 vha->qla_stats.input_requests++;
620 cur_seg = scsi_sglist(cmd);
621 ctx = GET_CMD_CTX_SP(sp);
624 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
625 QLA_DSDS_PER_IOCB : tot_dsds;
626 tot_dsds -= avail_dsds;
627 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
629 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
630 struct dsd_dma, list);
631 next_dsd = dsd_ptr->dsd_addr;
632 list_del(&dsd_ptr->list);
634 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
641 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
642 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
643 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
645 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
646 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
647 *cur_dsd++ = cpu_to_le32(dsd_list_len);
649 cur_dsd = (uint32_t *)next_dsd;
653 sle_dma = sg_dma_address(cur_seg);
654 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
656 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
657 cur_seg = sg_next(cur_seg);
662 /* Null termination */
666 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672 * for Command Type 6.
674 * @dsds: number of data segment decriptors needed
676 * Returns the number of dsd list needed to store @dsds.
678 static inline uint16_t
679 qla24xx_calc_dsd_lists(uint16_t dsds)
681 uint16_t dsd_lists = 0;
683 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684 if (dsds % QLA_DSDS_PER_IOCB)
691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
694 * @sp: SRB command to process
695 * @cmd_pkt: Command type 3 IOCB
696 * @tot_dsds: Total number of segments to transfer
697 * @req: pointer to request queue
700 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
701 uint16_t tot_dsds, struct req_que *req)
705 scsi_qla_host_t *vha;
706 struct scsi_cmnd *cmd;
707 struct scatterlist *sg;
710 cmd = GET_CMD_SP(sp);
712 /* Update entry type to indicate Command Type 3 IOCB */
713 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
715 /* No data transfer */
716 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
717 cmd_pkt->byte_count = cpu_to_le32(0);
723 /* Set transfer direction */
724 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
725 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
726 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
727 vha->qla_stats.output_requests++;
728 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
729 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
730 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
731 vha->qla_stats.input_requests++;
734 /* One DSD is available in the Command Type 3 IOCB */
736 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
738 /* Load data segments */
740 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
742 cont_a64_entry_t *cont_pkt;
744 /* Allocate additional continuation packets? */
745 if (avail_dsds == 0) {
747 * Five DSDs are available in the Continuation
750 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
751 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
755 sle_dma = sg_dma_address(sg);
756 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
757 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
758 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
763 struct fw_dif_context {
766 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
767 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
771 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
775 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
776 unsigned int protcnt)
778 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
780 switch (scsi_get_prot_type(cmd)) {
781 case SCSI_PROT_DIF_TYPE0:
783 * No check for ql2xenablehba_err_chk, as it would be an
784 * I/O error if hba tag generation is not done.
786 pkt->ref_tag = cpu_to_le32((uint32_t)
787 (0xffffffff & scsi_get_lba(cmd)));
789 if (!qla2x00_hba_err_chk_enabled(sp))
792 pkt->ref_tag_mask[0] = 0xff;
793 pkt->ref_tag_mask[1] = 0xff;
794 pkt->ref_tag_mask[2] = 0xff;
795 pkt->ref_tag_mask[3] = 0xff;
799 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
800 * match LBA in CDB + N
802 case SCSI_PROT_DIF_TYPE2:
803 pkt->app_tag = cpu_to_le16(0);
804 pkt->app_tag_mask[0] = 0x0;
805 pkt->app_tag_mask[1] = 0x0;
807 pkt->ref_tag = cpu_to_le32((uint32_t)
808 (0xffffffff & scsi_get_lba(cmd)));
810 if (!qla2x00_hba_err_chk_enabled(sp))
813 /* enable ALL bytes of the ref tag */
814 pkt->ref_tag_mask[0] = 0xff;
815 pkt->ref_tag_mask[1] = 0xff;
816 pkt->ref_tag_mask[2] = 0xff;
817 pkt->ref_tag_mask[3] = 0xff;
820 /* For Type 3 protection: 16 bit GUARD only */
821 case SCSI_PROT_DIF_TYPE3:
822 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
823 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
828 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
831 case SCSI_PROT_DIF_TYPE1:
832 pkt->ref_tag = cpu_to_le32((uint32_t)
833 (0xffffffff & scsi_get_lba(cmd)));
834 pkt->app_tag = cpu_to_le16(0);
835 pkt->app_tag_mask[0] = 0x0;
836 pkt->app_tag_mask[1] = 0x0;
838 if (!qla2x00_hba_err_chk_enabled(sp))
841 /* enable ALL bytes of the ref tag */
842 pkt->ref_tag_mask[0] = 0xff;
843 pkt->ref_tag_mask[1] = 0xff;
844 pkt->ref_tag_mask[2] = 0xff;
845 pkt->ref_tag_mask[3] = 0xff;
851 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
854 struct scatterlist *sg;
855 uint32_t cumulative_partial, sg_len;
856 dma_addr_t sg_dma_addr;
858 if (sgx->num_bytes == sgx->tot_bytes)
862 cumulative_partial = sgx->tot_partial;
864 sg_dma_addr = sg_dma_address(sg);
865 sg_len = sg_dma_len(sg);
867 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
869 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
870 sgx->dma_len = (blk_sz - cumulative_partial);
871 sgx->tot_partial = 0;
872 sgx->num_bytes += blk_sz;
875 sgx->dma_len = sg_len - sgx->bytes_consumed;
876 sgx->tot_partial += sgx->dma_len;
880 sgx->bytes_consumed += sgx->dma_len;
882 if (sg_len == sgx->bytes_consumed) {
886 sgx->bytes_consumed = 0;
893 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
894 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
897 uint8_t avail_dsds = 0;
898 uint32_t dsd_list_len;
899 struct dsd_dma *dsd_ptr;
900 struct scatterlist *sg_prot;
901 uint32_t *cur_dsd = dsd;
902 uint16_t used_dsds = tot_dsds;
903 uint32_t prot_int; /* protection interval */
907 uint32_t sle_dma_len, tot_prot_dma_len = 0;
908 struct scsi_cmnd *cmd;
910 memset(&sgx, 0, sizeof(struct qla2_sgx));
912 cmd = GET_CMD_SP(sp);
913 prot_int = cmd->device->sector_size;
915 sgx.tot_bytes = scsi_bufflen(cmd);
916 sgx.cur_sg = scsi_sglist(cmd);
919 sg_prot = scsi_prot_sglist(cmd);
921 prot_int = tc->blk_sz;
922 sgx.tot_bytes = tc->bufflen;
924 sg_prot = tc->prot_sg;
930 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
932 sle_dma = sgx.dma_addr;
933 sle_dma_len = sgx.dma_len;
935 /* Allocate additional continuation packets? */
936 if (avail_dsds == 0) {
937 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
938 QLA_DSDS_PER_IOCB : used_dsds;
939 dsd_list_len = (avail_dsds + 1) * 12;
940 used_dsds -= avail_dsds;
942 /* allocate tracking DS */
943 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
947 /* allocate new list */
948 dsd_ptr->dsd_addr = next_dsd =
949 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
950 &dsd_ptr->dsd_list_dma);
954 * Need to cleanup only this dsd_ptr, rest
955 * will be done by sp_free_dma()
962 list_add_tail(&dsd_ptr->list,
963 &((struct crc_context *)
964 sp->u.scmd.ctx)->dsd_list);
966 sp->flags |= SRB_CRC_CTX_DSD_VALID;
968 list_add_tail(&dsd_ptr->list,
969 &(tc->ctx->dsd_list));
970 *tc->ctx_dsd_alloced = 1;
974 /* add new list to cmd iocb or last list */
975 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
976 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
977 *cur_dsd++ = dsd_list_len;
978 cur_dsd = (uint32_t *)next_dsd;
980 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
981 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
982 *cur_dsd++ = cpu_to_le32(sle_dma_len);
986 /* Got a full protection interval */
987 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
990 tot_prot_dma_len += sle_dma_len;
991 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
992 tot_prot_dma_len = 0;
993 sg_prot = sg_next(sg_prot);
996 partial = 1; /* So as to not re-enter this block */
1000 /* Null termination */
1008 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1009 uint16_t tot_dsds, struct qla_tc_param *tc)
1012 uint8_t avail_dsds = 0;
1013 uint32_t dsd_list_len;
1014 struct dsd_dma *dsd_ptr;
1015 struct scatterlist *sg, *sgl;
1016 uint32_t *cur_dsd = dsd;
1018 uint16_t used_dsds = tot_dsds;
1019 struct scsi_cmnd *cmd;
1022 cmd = GET_CMD_SP(sp);
1023 sgl = scsi_sglist(cmd);
1032 for_each_sg(sgl, sg, tot_dsds, i) {
1035 /* Allocate additional continuation packets? */
1036 if (avail_dsds == 0) {
1037 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038 QLA_DSDS_PER_IOCB : used_dsds;
1039 dsd_list_len = (avail_dsds + 1) * 12;
1040 used_dsds -= avail_dsds;
1042 /* allocate tracking DS */
1043 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1047 /* allocate new list */
1048 dsd_ptr->dsd_addr = next_dsd =
1049 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050 &dsd_ptr->dsd_list_dma);
1054 * Need to cleanup only this dsd_ptr, rest
1055 * will be done by sp_free_dma()
1062 list_add_tail(&dsd_ptr->list,
1063 &((struct crc_context *)
1064 sp->u.scmd.ctx)->dsd_list);
1066 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1068 list_add_tail(&dsd_ptr->list,
1069 &(tc->ctx->dsd_list));
1070 *tc->ctx_dsd_alloced = 1;
1073 /* add new list to cmd iocb or last list */
1074 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076 *cur_dsd++ = dsd_list_len;
1077 cur_dsd = (uint32_t *)next_dsd;
1079 sle_dma = sg_dma_address(sg);
1081 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1087 /* Null termination */
1095 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1096 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1099 uint8_t avail_dsds = 0;
1100 uint32_t dsd_list_len;
1101 struct dsd_dma *dsd_ptr;
1102 struct scatterlist *sg, *sgl;
1104 struct scsi_cmnd *cmd;
1105 uint32_t *cur_dsd = dsd;
1106 uint16_t used_dsds = tot_dsds;
1107 struct scsi_qla_host *vha;
1110 cmd = GET_CMD_SP(sp);
1111 sgl = scsi_prot_sglist(cmd);
1121 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122 "%s: enter\n", __func__);
1124 for_each_sg(sgl, sg, tot_dsds, i) {
1127 /* Allocate additional continuation packets? */
1128 if (avail_dsds == 0) {
1129 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130 QLA_DSDS_PER_IOCB : used_dsds;
1131 dsd_list_len = (avail_dsds + 1) * 12;
1132 used_dsds -= avail_dsds;
1134 /* allocate tracking DS */
1135 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1139 /* allocate new list */
1140 dsd_ptr->dsd_addr = next_dsd =
1141 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142 &dsd_ptr->dsd_list_dma);
1146 * Need to cleanup only this dsd_ptr, rest
1147 * will be done by sp_free_dma()
1154 list_add_tail(&dsd_ptr->list,
1155 &((struct crc_context *)
1156 sp->u.scmd.ctx)->dsd_list);
1158 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1160 list_add_tail(&dsd_ptr->list,
1161 &(tc->ctx->dsd_list));
1162 *tc->ctx_dsd_alloced = 1;
1165 /* add new list to cmd iocb or last list */
1166 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168 *cur_dsd++ = dsd_list_len;
1169 cur_dsd = (uint32_t *)next_dsd;
1171 sle_dma = sg_dma_address(sg);
1173 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1179 /* Null termination */
1187 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188 * Type 6 IOCB types.
1190 * @sp: SRB command to process
1191 * @cmd_pkt: Command type 3 IOCB
1192 * @tot_dsds: Total number of segments to transfer
1195 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1198 uint32_t *cur_dsd, *fcp_dl;
1199 scsi_qla_host_t *vha;
1200 struct scsi_cmnd *cmd;
1201 uint32_t total_bytes = 0;
1202 uint32_t data_bytes;
1204 uint8_t bundling = 1;
1207 struct crc_context *crc_ctx_pkt = NULL;
1208 struct qla_hw_data *ha;
1209 uint8_t additional_fcpcdb_len;
1210 uint16_t fcp_cmnd_len;
1211 struct fcp_cmnd *fcp_cmnd;
1212 dma_addr_t crc_ctx_dma;
1214 cmd = GET_CMD_SP(sp);
1216 /* Update entry type to indicate Command Type CRC_2 IOCB */
1217 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1222 /* No data transfer */
1223 data_bytes = scsi_bufflen(cmd);
1224 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1225 cmd_pkt->byte_count = cpu_to_le32(0);
1229 cmd_pkt->vp_index = sp->vha->vp_idx;
1231 /* Set transfer direction */
1232 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233 cmd_pkt->control_flags =
1234 cpu_to_le16(CF_WRITE_DATA);
1235 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236 cmd_pkt->control_flags =
1237 cpu_to_le16(CF_READ_DATA);
1240 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1246 /* Allocate CRC context from global pool */
1247 crc_ctx_pkt = sp->u.scmd.ctx =
1248 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1251 goto crc_queuing_error;
1253 /* Zero out CTX area. */
1254 clr_ptr = (uint8_t *)crc_ctx_pkt;
1255 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1257 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1259 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1262 crc_ctx_pkt->handle = cmd_pkt->handle;
1264 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1266 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1267 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1269 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1273 /* Determine SCSI command length -- align to 4 byte boundary */
1274 if (cmd->cmd_len > 16) {
1275 additional_fcpcdb_len = cmd->cmd_len - 16;
1276 if ((cmd->cmd_len % 4) != 0) {
1277 /* SCSI cmd > 16 bytes must be multiple of 4 */
1278 goto crc_queuing_error;
1280 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1282 additional_fcpcdb_len = 0;
1283 fcp_cmnd_len = 12 + 16 + 4;
1286 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1288 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290 fcp_cmnd->additional_cdb_len |= 1;
1291 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292 fcp_cmnd->additional_cdb_len |= 2;
1294 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1295 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301 fcp_cmnd->task_management = 0;
1302 fcp_cmnd->task_attribute = TSK_SIMPLE;
1304 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1306 /* Compute dif len and adjust data len to incude protection */
1308 blk_size = cmd->device->sector_size;
1309 dif_bytes = (data_bytes / blk_size) * 8;
1311 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1312 case SCSI_PROT_READ_INSERT:
1313 case SCSI_PROT_WRITE_STRIP:
1314 total_bytes = data_bytes;
1315 data_bytes += dif_bytes;
1318 case SCSI_PROT_READ_STRIP:
1319 case SCSI_PROT_WRITE_INSERT:
1320 case SCSI_PROT_READ_PASS:
1321 case SCSI_PROT_WRITE_PASS:
1322 total_bytes = data_bytes + dif_bytes;
1328 if (!qla2x00_hba_err_chk_enabled(sp))
1329 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1330 /* HBA error checking enabled */
1331 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334 SCSI_PROT_DIF_TYPE2))
1335 fw_prot_opts |= BIT_10;
1336 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337 SCSI_PROT_DIF_TYPE3)
1338 fw_prot_opts |= BIT_11;
1342 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1345 * Configure Bundling if we need to fetch interlaving
1346 * protection PCI accesses
1348 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1352 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1355 /* Finish the common fields of CRC pkt */
1356 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1359 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1360 /* Fibre channel byte count */
1361 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363 additional_fcpcdb_len);
1364 *fcp_dl = htonl(total_bytes);
1366 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1367 cmd_pkt->byte_count = cpu_to_le32(0);
1370 /* Walks data segments */
1372 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1374 if (!bundling && tot_prot_dsds) {
1375 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1376 cur_dsd, tot_dsds, NULL))
1377 goto crc_queuing_error;
1378 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1379 (tot_dsds - tot_prot_dsds), NULL))
1380 goto crc_queuing_error;
1382 if (bundling && tot_prot_dsds) {
1383 /* Walks dif segments */
1384 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1385 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1387 tot_prot_dsds, NULL))
1388 goto crc_queuing_error;
1393 /* Cleanup will be performed by the caller */
1395 return QLA_FUNCTION_FAILED;
1399 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400 * @sp: command to send to the ISP
1402 * Returns non-zero if a failure occurred, else zero.
1405 qla24xx_start_scsi(srb_t *sp)
1408 unsigned long flags;
1412 struct cmd_type_7 *cmd_pkt;
1416 struct req_que *req = NULL;
1417 struct rsp_que *rsp = NULL;
1418 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1419 struct scsi_qla_host *vha = sp->vha;
1420 struct qla_hw_data *ha = vha->hw;
1422 /* Setup device pointers. */
1426 /* So we know we haven't pci_map'ed anything yet */
1429 /* Send marker if required */
1430 if (vha->marker_needed != 0) {
1431 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1433 return QLA_FUNCTION_FAILED;
1434 vha->marker_needed = 0;
1437 /* Acquire ring specific lock */
1438 spin_lock_irqsave(&ha->hardware_lock, flags);
1440 /* Check for room in outstanding command list. */
1441 handle = req->current_outstanding_cmd;
1442 for (index = 1; index < req->num_outstanding_cmds; index++) {
1444 if (handle == req->num_outstanding_cmds)
1446 if (!req->outstanding_cmds[handle])
1449 if (index == req->num_outstanding_cmds)
1452 /* Map the sg table so we have an accurate count of sg entries needed */
1453 if (scsi_sg_count(cmd)) {
1454 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455 scsi_sg_count(cmd), cmd->sc_data_direction);
1456 if (unlikely(!nseg))
1462 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1463 if (req->cnt < (req_cnt + 2)) {
1464 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465 RD_REG_DWORD_RELAXED(req->req_q_out);
1466 if (req->ring_index < cnt)
1467 req->cnt = cnt - req->ring_index;
1469 req->cnt = req->length -
1470 (req->ring_index - cnt);
1471 if (req->cnt < (req_cnt + 2))
1475 /* Build command packet. */
1476 req->current_outstanding_cmd = handle;
1477 req->outstanding_cmds[handle] = sp;
1478 sp->handle = handle;
1479 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1480 req->cnt -= req_cnt;
1482 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1483 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1485 /* Zero out remaining portion of packet. */
1486 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1487 clr_ptr = (uint32_t *)cmd_pkt + 2;
1488 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1491 /* Set NPORT-ID and LUN number*/
1492 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1496 cmd_pkt->vp_index = sp->vha->vp_idx;
1498 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1499 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1501 cmd_pkt->task = TSK_SIMPLE;
1503 /* Load SCSI command packet. */
1504 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1507 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1509 /* Build IOCB segments */
1510 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1512 /* Set total data segment count. */
1513 cmd_pkt->entry_count = (uint8_t)req_cnt;
1515 /* Adjust ring index. */
1517 if (req->ring_index == req->length) {
1518 req->ring_index = 0;
1519 req->ring_ptr = req->ring;
1523 sp->flags |= SRB_DMA_VALID;
1525 /* Set chip new ring index. */
1526 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1528 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1533 scsi_dma_unmap(cmd);
1535 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537 return QLA_FUNCTION_FAILED;
1541 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1542 * @sp: command to send to the ISP
1544 * Returns non-zero if a failure occurred, else zero.
1547 qla24xx_dif_start_scsi(srb_t *sp)
1550 unsigned long flags;
1555 uint16_t req_cnt = 0;
1557 uint16_t tot_prot_dsds;
1558 uint16_t fw_prot_opts = 0;
1559 struct req_que *req = NULL;
1560 struct rsp_que *rsp = NULL;
1561 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1562 struct scsi_qla_host *vha = sp->vha;
1563 struct qla_hw_data *ha = vha->hw;
1564 struct cmd_type_crc_2 *cmd_pkt;
1565 uint32_t status = 0;
1567 #define QDSS_GOT_Q_SPACE BIT_0
1569 /* Only process protection or >16 cdb in this routine */
1570 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1571 if (cmd->cmd_len <= 16)
1572 return qla24xx_start_scsi(sp);
1575 /* Setup device pointers. */
1579 /* So we know we haven't pci_map'ed anything yet */
1582 /* Send marker if required */
1583 if (vha->marker_needed != 0) {
1584 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1586 return QLA_FUNCTION_FAILED;
1587 vha->marker_needed = 0;
1590 /* Acquire ring specific lock */
1591 spin_lock_irqsave(&ha->hardware_lock, flags);
1593 /* Check for room in outstanding command list. */
1594 handle = req->current_outstanding_cmd;
1595 for (index = 1; index < req->num_outstanding_cmds; index++) {
1597 if (handle == req->num_outstanding_cmds)
1599 if (!req->outstanding_cmds[handle])
1603 if (index == req->num_outstanding_cmds)
1606 /* Compute number of required data segments */
1607 /* Map the sg table so we have an accurate count of sg entries needed */
1608 if (scsi_sg_count(cmd)) {
1609 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1610 scsi_sg_count(cmd), cmd->sc_data_direction);
1611 if (unlikely(!nseg))
1614 sp->flags |= SRB_DMA_VALID;
1616 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1617 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1618 struct qla2_sgx sgx;
1621 memset(&sgx, 0, sizeof(struct qla2_sgx));
1622 sgx.tot_bytes = scsi_bufflen(cmd);
1623 sgx.cur_sg = scsi_sglist(cmd);
1627 while (qla24xx_get_one_block_sg(
1628 cmd->device->sector_size, &sgx, &partial))
1634 /* number of required data segments */
1637 /* Compute number of required protection segments */
1638 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1639 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1640 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1641 if (unlikely(!nseg))
1644 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1646 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1647 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1648 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1655 /* Total Data and protection sg segment(s) */
1656 tot_prot_dsds = nseg;
1658 if (req->cnt < (req_cnt + 2)) {
1659 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1660 RD_REG_DWORD_RELAXED(req->req_q_out);
1661 if (req->ring_index < cnt)
1662 req->cnt = cnt - req->ring_index;
1664 req->cnt = req->length -
1665 (req->ring_index - cnt);
1666 if (req->cnt < (req_cnt + 2))
1670 status |= QDSS_GOT_Q_SPACE;
1672 /* Build header part of command packet (excluding the OPCODE). */
1673 req->current_outstanding_cmd = handle;
1674 req->outstanding_cmds[handle] = sp;
1675 sp->handle = handle;
1676 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1677 req->cnt -= req_cnt;
1679 /* Fill-in common area */
1680 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1681 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1683 clr_ptr = (uint32_t *)cmd_pkt + 2;
1684 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1686 /* Set NPORT-ID and LUN number*/
1687 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1688 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1689 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1690 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1692 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1693 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1695 /* Total Data and protection segment(s) */
1696 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1698 /* Build IOCB segments and adjust for data protection segments */
1699 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1700 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1704 cmd_pkt->entry_count = (uint8_t)req_cnt;
1705 /* Specify response queue number where completion should happen */
1706 cmd_pkt->entry_status = (uint8_t) rsp->id;
1707 cmd_pkt->timeout = cpu_to_le16(0);
1710 /* Adjust ring index. */
1712 if (req->ring_index == req->length) {
1713 req->ring_index = 0;
1714 req->ring_ptr = req->ring;
1718 /* Set chip new ring index. */
1719 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1721 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1726 if (status & QDSS_GOT_Q_SPACE) {
1727 req->outstanding_cmds[handle] = NULL;
1728 req->cnt += req_cnt;
1730 /* Cleanup will be performed by the caller (queuecommand) */
1732 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1733 return QLA_FUNCTION_FAILED;
1737 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1738 * @sp: command to send to the ISP
1740 * Returns non-zero if a failure occurred, else zero.
1743 qla2xxx_start_scsi_mq(srb_t *sp)
1746 unsigned long flags;
1750 struct cmd_type_7 *cmd_pkt;
1754 struct req_que *req = NULL;
1755 struct rsp_que *rsp = NULL;
1756 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1757 struct scsi_qla_host *vha = sp->fcport->vha;
1758 struct qla_hw_data *ha = vha->hw;
1759 struct qla_qpair *qpair = sp->qpair;
1761 /* Acquire qpair specific lock */
1762 spin_lock_irqsave(&qpair->qp_lock, flags);
1764 /* Setup qpair pointers */
1768 /* So we know we haven't pci_map'ed anything yet */
1771 /* Send marker if required */
1772 if (vha->marker_needed != 0) {
1773 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1775 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1776 return QLA_FUNCTION_FAILED;
1778 vha->marker_needed = 0;
1781 /* Check for room in outstanding command list. */
1782 handle = req->current_outstanding_cmd;
1783 for (index = 1; index < req->num_outstanding_cmds; index++) {
1785 if (handle == req->num_outstanding_cmds)
1787 if (!req->outstanding_cmds[handle])
1790 if (index == req->num_outstanding_cmds)
1793 /* Map the sg table so we have an accurate count of sg entries needed */
1794 if (scsi_sg_count(cmd)) {
1795 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1796 scsi_sg_count(cmd), cmd->sc_data_direction);
1797 if (unlikely(!nseg))
1803 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1804 if (req->cnt < (req_cnt + 2)) {
1805 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1806 RD_REG_DWORD_RELAXED(req->req_q_out);
1807 if (req->ring_index < cnt)
1808 req->cnt = cnt - req->ring_index;
1810 req->cnt = req->length -
1811 (req->ring_index - cnt);
1812 if (req->cnt < (req_cnt + 2))
1816 /* Build command packet. */
1817 req->current_outstanding_cmd = handle;
1818 req->outstanding_cmds[handle] = sp;
1819 sp->handle = handle;
1820 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1821 req->cnt -= req_cnt;
1823 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1824 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1826 /* Zero out remaining portion of packet. */
1827 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1828 clr_ptr = (uint32_t *)cmd_pkt + 2;
1829 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1830 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1832 /* Set NPORT-ID and LUN number*/
1833 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1834 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1835 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1836 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1837 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1839 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1840 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1842 cmd_pkt->task = TSK_SIMPLE;
1844 /* Load SCSI command packet. */
1845 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1846 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1848 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1850 /* Build IOCB segments */
1851 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1853 /* Set total data segment count. */
1854 cmd_pkt->entry_count = (uint8_t)req_cnt;
1856 /* Adjust ring index. */
1858 if (req->ring_index == req->length) {
1859 req->ring_index = 0;
1860 req->ring_ptr = req->ring;
1864 sp->flags |= SRB_DMA_VALID;
1866 /* Set chip new ring index. */
1867 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1869 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1874 scsi_dma_unmap(cmd);
1876 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1878 return QLA_FUNCTION_FAILED;
1883 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1884 * @sp: command to send to the ISP
1886 * Returns non-zero if a failure occurred, else zero.
1889 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1892 unsigned long flags;
1897 uint16_t req_cnt = 0;
1899 uint16_t tot_prot_dsds;
1900 uint16_t fw_prot_opts = 0;
1901 struct req_que *req = NULL;
1902 struct rsp_que *rsp = NULL;
1903 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1904 struct scsi_qla_host *vha = sp->fcport->vha;
1905 struct qla_hw_data *ha = vha->hw;
1906 struct cmd_type_crc_2 *cmd_pkt;
1907 uint32_t status = 0;
1908 struct qla_qpair *qpair = sp->qpair;
1910 #define QDSS_GOT_Q_SPACE BIT_0
1912 /* Check for host side state */
1913 if (!qpair->online) {
1914 cmd->result = DID_NO_CONNECT << 16;
1915 return QLA_INTERFACE_ERROR;
1918 if (!qpair->difdix_supported &&
1919 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1920 cmd->result = DID_NO_CONNECT << 16;
1921 return QLA_INTERFACE_ERROR;
1924 /* Only process protection or >16 cdb in this routine */
1925 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1926 if (cmd->cmd_len <= 16)
1927 return qla2xxx_start_scsi_mq(sp);
1930 spin_lock_irqsave(&qpair->qp_lock, flags);
1932 /* Setup qpair pointers */
1936 /* So we know we haven't pci_map'ed anything yet */
1939 /* Send marker if required */
1940 if (vha->marker_needed != 0) {
1941 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1943 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1944 return QLA_FUNCTION_FAILED;
1946 vha->marker_needed = 0;
1949 /* Check for room in outstanding command list. */
1950 handle = req->current_outstanding_cmd;
1951 for (index = 1; index < req->num_outstanding_cmds; index++) {
1953 if (handle == req->num_outstanding_cmds)
1955 if (!req->outstanding_cmds[handle])
1959 if (index == req->num_outstanding_cmds)
1962 /* Compute number of required data segments */
1963 /* Map the sg table so we have an accurate count of sg entries needed */
1964 if (scsi_sg_count(cmd)) {
1965 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1966 scsi_sg_count(cmd), cmd->sc_data_direction);
1967 if (unlikely(!nseg))
1970 sp->flags |= SRB_DMA_VALID;
1972 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1973 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1974 struct qla2_sgx sgx;
1977 memset(&sgx, 0, sizeof(struct qla2_sgx));
1978 sgx.tot_bytes = scsi_bufflen(cmd);
1979 sgx.cur_sg = scsi_sglist(cmd);
1983 while (qla24xx_get_one_block_sg(
1984 cmd->device->sector_size, &sgx, &partial))
1990 /* number of required data segments */
1993 /* Compute number of required protection segments */
1994 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1995 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1996 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1997 if (unlikely(!nseg))
2000 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2002 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2003 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2004 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2011 /* Total Data and protection sg segment(s) */
2012 tot_prot_dsds = nseg;
2014 if (req->cnt < (req_cnt + 2)) {
2015 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2016 RD_REG_DWORD_RELAXED(req->req_q_out);
2017 if (req->ring_index < cnt)
2018 req->cnt = cnt - req->ring_index;
2020 req->cnt = req->length -
2021 (req->ring_index - cnt);
2022 if (req->cnt < (req_cnt + 2))
2026 status |= QDSS_GOT_Q_SPACE;
2028 /* Build header part of command packet (excluding the OPCODE). */
2029 req->current_outstanding_cmd = handle;
2030 req->outstanding_cmds[handle] = sp;
2031 sp->handle = handle;
2032 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2033 req->cnt -= req_cnt;
2035 /* Fill-in common area */
2036 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2037 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2039 clr_ptr = (uint32_t *)cmd_pkt + 2;
2040 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2042 /* Set NPORT-ID and LUN number*/
2043 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2044 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2045 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2046 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2048 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2049 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2051 /* Total Data and protection segment(s) */
2052 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2054 /* Build IOCB segments and adjust for data protection segments */
2055 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2056 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2060 cmd_pkt->entry_count = (uint8_t)req_cnt;
2061 cmd_pkt->timeout = cpu_to_le16(0);
2064 /* Adjust ring index. */
2066 if (req->ring_index == req->length) {
2067 req->ring_index = 0;
2068 req->ring_ptr = req->ring;
2072 /* Set chip new ring index. */
2073 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2075 /* Manage unprocessed RIO/ZIO commands in response queue. */
2076 if (vha->flags.process_response_queue &&
2077 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2078 qla24xx_process_response_queue(vha, rsp);
2080 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2085 if (status & QDSS_GOT_Q_SPACE) {
2086 req->outstanding_cmds[handle] = NULL;
2087 req->cnt += req_cnt;
2089 /* Cleanup will be performed by the caller (queuecommand) */
2091 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2092 return QLA_FUNCTION_FAILED;
2095 /* Generic Control-SRB manipulation functions. */
2097 /* hardware_lock assumed to be held. */
2100 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2102 scsi_qla_host_t *vha = qpair->vha;
2103 struct qla_hw_data *ha = vha->hw;
2104 struct req_que *req = qpair->req;
2105 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2106 uint32_t index, handle;
2108 uint16_t cnt, req_cnt;
2114 if (sp && (sp->type != SRB_SCSI_CMD)) {
2115 /* Adjust entry-counts as needed. */
2116 req_cnt = sp->iocbs;
2119 /* Check for room on request queue. */
2120 if (req->cnt < req_cnt + 2) {
2121 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2122 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
2123 else if (IS_P3P_TYPE(ha))
2124 cnt = RD_REG_DWORD(®->isp82.req_q_out);
2125 else if (IS_FWI2_CAPABLE(ha))
2126 cnt = RD_REG_DWORD(®->isp24.req_q_out);
2127 else if (IS_QLAFX00(ha))
2128 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
2130 cnt = qla2x00_debounce_register(
2131 ISP_REQ_Q_OUT(ha, ®->isp));
2133 if (req->ring_index < cnt)
2134 req->cnt = cnt - req->ring_index;
2136 req->cnt = req->length -
2137 (req->ring_index - cnt);
2139 if (req->cnt < req_cnt + 2)
2143 /* Check for room in outstanding command list. */
2144 handle = req->current_outstanding_cmd;
2145 for (index = 1; index < req->num_outstanding_cmds; index++) {
2147 if (handle == req->num_outstanding_cmds)
2149 if (!req->outstanding_cmds[handle])
2152 if (index == req->num_outstanding_cmds) {
2153 ql_log(ql_log_warn, vha, 0x700b,
2154 "No room on outstanding cmd array.\n");
2158 /* Prep command array. */
2159 req->current_outstanding_cmd = handle;
2160 req->outstanding_cmds[handle] = sp;
2161 sp->handle = handle;
2165 req->cnt -= req_cnt;
2166 pkt = req->ring_ptr;
2167 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2168 if (IS_QLAFX00(ha)) {
2169 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2170 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2172 pkt->entry_count = req_cnt;
2173 pkt->handle = handle;
2179 qpair->tgt_counters.num_alloc_iocb_failed++;
2184 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2186 scsi_qla_host_t *vha = qpair->vha;
2188 if (qla2x00_reset_active(vha))
2191 return __qla2x00_alloc_iocbs(qpair, sp);
2195 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2197 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2201 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2203 struct srb_iocb *lio = &sp->u.iocb_cmd;
2205 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2206 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2207 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2208 logio->control_flags |= LCF_NVME_PRLI;
2210 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2211 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2212 logio->port_id[1] = sp->fcport->d_id.b.area;
2213 logio->port_id[2] = sp->fcport->d_id.b.domain;
2214 logio->vp_index = sp->vha->vp_idx;
2218 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2220 struct srb_iocb *lio = &sp->u.iocb_cmd;
2222 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2223 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2225 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2226 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2227 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2228 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2229 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2230 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2231 logio->port_id[1] = sp->fcport->d_id.b.area;
2232 logio->port_id[2] = sp->fcport->d_id.b.domain;
2233 logio->vp_index = sp->vha->vp_idx;
2237 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2239 struct qla_hw_data *ha = sp->vha->hw;
2240 struct srb_iocb *lio = &sp->u.iocb_cmd;
2243 mbx->entry_type = MBX_IOCB_TYPE;
2244 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2245 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2246 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2247 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2248 if (HAS_EXTENDED_IDS(ha)) {
2249 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2250 mbx->mb10 = cpu_to_le16(opts);
2252 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2254 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2255 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2256 sp->fcport->d_id.b.al_pa);
2257 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2261 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2263 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2264 logio->control_flags =
2265 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2266 if (!sp->fcport->se_sess ||
2267 !sp->fcport->keep_nport_handle)
2268 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2269 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2270 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2271 logio->port_id[1] = sp->fcport->d_id.b.area;
2272 logio->port_id[2] = sp->fcport->d_id.b.domain;
2273 logio->vp_index = sp->vha->vp_idx;
2277 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2279 struct qla_hw_data *ha = sp->vha->hw;
2281 mbx->entry_type = MBX_IOCB_TYPE;
2282 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2283 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2284 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2285 cpu_to_le16(sp->fcport->loop_id):
2286 cpu_to_le16(sp->fcport->loop_id << 8);
2287 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2288 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2289 sp->fcport->d_id.b.al_pa);
2290 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2291 /* Implicit: mbx->mbx10 = 0. */
2295 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2297 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2298 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2299 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2300 logio->vp_index = sp->vha->vp_idx;
2304 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2306 struct qla_hw_data *ha = sp->vha->hw;
2308 mbx->entry_type = MBX_IOCB_TYPE;
2309 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2310 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2311 if (HAS_EXTENDED_IDS(ha)) {
2312 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2313 mbx->mb10 = cpu_to_le16(BIT_0);
2315 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2317 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2318 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2319 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2320 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2321 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2325 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2329 struct fc_port *fcport = sp->fcport;
2330 scsi_qla_host_t *vha = fcport->vha;
2331 struct qla_hw_data *ha = vha->hw;
2332 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2333 struct req_que *req = vha->req;
2335 flags = iocb->u.tmf.flags;
2336 lun = iocb->u.tmf.lun;
2338 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2339 tsk->entry_count = 1;
2340 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2341 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2342 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2343 tsk->control_flags = cpu_to_le32(flags);
2344 tsk->port_id[0] = fcport->d_id.b.al_pa;
2345 tsk->port_id[1] = fcport->d_id.b.area;
2346 tsk->port_id[2] = fcport->d_id.b.domain;
2347 tsk->vp_index = fcport->vha->vp_idx;
2349 if (flags == TCF_LUN_RESET) {
2350 int_to_scsilun(lun, &tsk->lun);
2351 host_to_fcp_swap((uint8_t *)&tsk->lun,
2357 qla2x00_els_dcmd_sp_free(void *data)
2360 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2364 if (elsio->u.els_logo.els_logo_pyld)
2365 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2366 elsio->u.els_logo.els_logo_pyld,
2367 elsio->u.els_logo.els_logo_pyld_dma);
2369 del_timer(&elsio->timer);
2374 qla2x00_els_dcmd_iocb_timeout(void *data)
2377 fc_port_t *fcport = sp->fcport;
2378 struct scsi_qla_host *vha = sp->vha;
2379 struct srb_iocb *lio = &sp->u.iocb_cmd;
2381 ql_dbg(ql_dbg_io, vha, 0x3069,
2382 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2383 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2384 fcport->d_id.b.al_pa);
2386 complete(&lio->u.els_logo.comp);
2390 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2393 fc_port_t *fcport = sp->fcport;
2394 struct srb_iocb *lio = &sp->u.iocb_cmd;
2395 struct scsi_qla_host *vha = sp->vha;
2397 ql_dbg(ql_dbg_io, vha, 0x3072,
2398 "%s hdl=%x, portid=%02x%02x%02x done\n",
2399 sp->name, sp->handle, fcport->d_id.b.domain,
2400 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2402 complete(&lio->u.els_logo.comp);
2406 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2407 port_id_t remote_did)
2410 fc_port_t *fcport = NULL;
2411 struct srb_iocb *elsio = NULL;
2412 struct qla_hw_data *ha = vha->hw;
2413 struct els_logo_payload logo_pyld;
2414 int rval = QLA_SUCCESS;
2416 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2418 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2422 /* Alloc SRB structure */
2423 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2426 ql_log(ql_log_info, vha, 0x70e6,
2427 "SRB allocation failed\n");
2431 elsio = &sp->u.iocb_cmd;
2432 fcport->loop_id = 0xFFFF;
2433 fcport->d_id.b.domain = remote_did.b.domain;
2434 fcport->d_id.b.area = remote_did.b.area;
2435 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2437 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2438 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2440 sp->type = SRB_ELS_DCMD;
2441 sp->name = "ELS_DCMD";
2442 sp->fcport = fcport;
2443 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2444 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2445 sp->done = qla2x00_els_dcmd_sp_done;
2446 sp->free = qla2x00_els_dcmd_sp_free;
2448 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2449 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2452 if (!elsio->u.els_logo.els_logo_pyld) {
2454 return QLA_FUNCTION_FAILED;
2457 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2459 elsio->u.els_logo.els_cmd = els_opcode;
2460 logo_pyld.opcode = els_opcode;
2461 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2462 logo_pyld.s_id[1] = vha->d_id.b.area;
2463 logo_pyld.s_id[2] = vha->d_id.b.domain;
2464 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2465 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2467 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2468 sizeof(struct els_logo_payload));
2470 rval = qla2x00_start_sp(sp);
2471 if (rval != QLA_SUCCESS) {
2473 return QLA_FUNCTION_FAILED;
2476 ql_dbg(ql_dbg_io, vha, 0x3074,
2477 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2478 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2479 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2481 wait_for_completion(&elsio->u.els_logo.comp);
2488 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2490 scsi_qla_host_t *vha = sp->vha;
2491 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2493 els_iocb->entry_type = ELS_IOCB_TYPE;
2494 els_iocb->entry_count = 1;
2495 els_iocb->sys_define = 0;
2496 els_iocb->entry_status = 0;
2497 els_iocb->handle = sp->handle;
2498 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2499 els_iocb->tx_dsd_count = 1;
2500 els_iocb->vp_index = vha->vp_idx;
2501 els_iocb->sof_type = EST_SOFI3;
2502 els_iocb->rx_dsd_count = 0;
2503 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2505 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2506 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2507 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2508 els_iocb->control_flags = 0;
2510 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2511 els_iocb->tx_address[0] =
2512 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2513 els_iocb->tx_address[1] =
2514 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2515 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2517 els_iocb->rx_byte_count = 0;
2518 els_iocb->rx_address[0] = 0;
2519 els_iocb->rx_address[1] = 0;
2520 els_iocb->rx_len = 0;
2522 sp->vha->qla_stats.control_requests++;
2526 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2528 struct bsg_job *bsg_job = sp->u.bsg_job;
2529 struct fc_bsg_request *bsg_request = bsg_job->request;
2531 els_iocb->entry_type = ELS_IOCB_TYPE;
2532 els_iocb->entry_count = 1;
2533 els_iocb->sys_define = 0;
2534 els_iocb->entry_status = 0;
2535 els_iocb->handle = sp->handle;
2536 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2537 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2538 els_iocb->vp_index = sp->vha->vp_idx;
2539 els_iocb->sof_type = EST_SOFI3;
2540 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2543 sp->type == SRB_ELS_CMD_RPT ?
2544 bsg_request->rqst_data.r_els.els_code :
2545 bsg_request->rqst_data.h_els.command_code;
2546 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2547 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2548 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2549 els_iocb->control_flags = 0;
2550 els_iocb->rx_byte_count =
2551 cpu_to_le32(bsg_job->reply_payload.payload_len);
2552 els_iocb->tx_byte_count =
2553 cpu_to_le32(bsg_job->request_payload.payload_len);
2555 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2556 (bsg_job->request_payload.sg_list)));
2557 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2558 (bsg_job->request_payload.sg_list)));
2559 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2560 (bsg_job->request_payload.sg_list));
2562 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2563 (bsg_job->reply_payload.sg_list)));
2564 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2565 (bsg_job->reply_payload.sg_list)));
2566 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2567 (bsg_job->reply_payload.sg_list));
2569 sp->vha->qla_stats.control_requests++;
2573 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2575 uint16_t avail_dsds;
2577 struct scatterlist *sg;
2580 scsi_qla_host_t *vha = sp->vha;
2581 struct qla_hw_data *ha = vha->hw;
2582 struct bsg_job *bsg_job = sp->u.bsg_job;
2583 int loop_iterartion = 0;
2584 int entry_count = 1;
2586 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2587 ct_iocb->entry_type = CT_IOCB_TYPE;
2588 ct_iocb->entry_status = 0;
2589 ct_iocb->handle1 = sp->handle;
2590 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2591 ct_iocb->status = cpu_to_le16(0);
2592 ct_iocb->control_flags = cpu_to_le16(0);
2593 ct_iocb->timeout = 0;
2594 ct_iocb->cmd_dsd_count =
2595 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2596 ct_iocb->total_dsd_count =
2597 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2598 ct_iocb->req_bytecount =
2599 cpu_to_le32(bsg_job->request_payload.payload_len);
2600 ct_iocb->rsp_bytecount =
2601 cpu_to_le32(bsg_job->reply_payload.payload_len);
2603 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2604 (bsg_job->request_payload.sg_list)));
2605 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2606 (bsg_job->request_payload.sg_list)));
2607 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2609 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2610 (bsg_job->reply_payload.sg_list)));
2611 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2612 (bsg_job->reply_payload.sg_list)));
2613 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2616 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2618 tot_dsds = bsg_job->reply_payload.sg_cnt;
2620 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2622 cont_a64_entry_t *cont_pkt;
2624 /* Allocate additional continuation packets? */
2625 if (avail_dsds == 0) {
2627 * Five DSDs are available in the Cont.
2630 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2631 vha->hw->req_q_map[0]);
2632 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2637 sle_dma = sg_dma_address(sg);
2638 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2639 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2640 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2644 ct_iocb->entry_count = entry_count;
2646 sp->vha->qla_stats.control_requests++;
2650 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2652 uint16_t avail_dsds;
2654 struct scatterlist *sg;
2656 uint16_t cmd_dsds, rsp_dsds;
2657 scsi_qla_host_t *vha = sp->vha;
2658 struct qla_hw_data *ha = vha->hw;
2659 struct bsg_job *bsg_job = sp->u.bsg_job;
2660 int entry_count = 1;
2661 cont_a64_entry_t *cont_pkt = NULL;
2663 ct_iocb->entry_type = CT_IOCB_TYPE;
2664 ct_iocb->entry_status = 0;
2665 ct_iocb->sys_define = 0;
2666 ct_iocb->handle = sp->handle;
2668 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2669 ct_iocb->vp_index = sp->vha->vp_idx;
2670 ct_iocb->comp_status = cpu_to_le16(0);
2672 cmd_dsds = bsg_job->request_payload.sg_cnt;
2673 rsp_dsds = bsg_job->reply_payload.sg_cnt;
2675 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2676 ct_iocb->timeout = 0;
2677 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2678 ct_iocb->cmd_byte_count =
2679 cpu_to_le32(bsg_job->request_payload.payload_len);
2682 cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2685 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2688 /* Allocate additional continuation packets? */
2689 if (avail_dsds == 0) {
2691 * Five DSDs are available in the Cont.
2694 cont_pkt = qla2x00_prep_cont_type1_iocb(
2695 vha, ha->req_q_map[0]);
2696 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2701 sle_dma = sg_dma_address(sg);
2702 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2703 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2704 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2710 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2713 /* Allocate additional continuation packets? */
2714 if (avail_dsds == 0) {
2716 * Five DSDs are available in the Cont.
2719 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2721 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2726 sle_dma = sg_dma_address(sg);
2727 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2728 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2729 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2732 ct_iocb->entry_count = entry_count;
2736 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2737 * @sp: command to send to the ISP
2739 * Returns non-zero if a failure occurred, else zero.
2742 qla82xx_start_scsi(srb_t *sp)
2745 unsigned long flags;
2746 struct scsi_cmnd *cmd;
2753 struct device_reg_82xx __iomem *reg;
2756 uint8_t additional_cdb_len;
2757 struct ct6_dsd *ctx;
2758 struct scsi_qla_host *vha = sp->vha;
2759 struct qla_hw_data *ha = vha->hw;
2760 struct req_que *req = NULL;
2761 struct rsp_que *rsp = NULL;
2763 /* Setup device pointers. */
2764 reg = &ha->iobase->isp82;
2765 cmd = GET_CMD_SP(sp);
2767 rsp = ha->rsp_q_map[0];
2769 /* So we know we haven't pci_map'ed anything yet */
2772 dbval = 0x04 | (ha->portnum << 5);
2774 /* Send marker if required */
2775 if (vha->marker_needed != 0) {
2776 if (qla2x00_marker(vha, req,
2777 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2778 ql_log(ql_log_warn, vha, 0x300c,
2779 "qla2x00_marker failed for cmd=%p.\n", cmd);
2780 return QLA_FUNCTION_FAILED;
2782 vha->marker_needed = 0;
2785 /* Acquire ring specific lock */
2786 spin_lock_irqsave(&ha->hardware_lock, flags);
2788 /* Check for room in outstanding command list. */
2789 handle = req->current_outstanding_cmd;
2790 for (index = 1; index < req->num_outstanding_cmds; index++) {
2792 if (handle == req->num_outstanding_cmds)
2794 if (!req->outstanding_cmds[handle])
2797 if (index == req->num_outstanding_cmds)
2800 /* Map the sg table so we have an accurate count of sg entries needed */
2801 if (scsi_sg_count(cmd)) {
2802 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2803 scsi_sg_count(cmd), cmd->sc_data_direction);
2804 if (unlikely(!nseg))
2811 if (tot_dsds > ql2xshiftctondsd) {
2812 struct cmd_type_6 *cmd_pkt;
2813 uint16_t more_dsd_lists = 0;
2814 struct dsd_dma *dsd_ptr;
2817 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2818 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2819 ql_dbg(ql_dbg_io, vha, 0x300d,
2820 "Num of DSD list %d is than %d for cmd=%p.\n",
2821 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2826 if (more_dsd_lists <= ha->gbl_dsd_avail)
2827 goto sufficient_dsds;
2829 more_dsd_lists -= ha->gbl_dsd_avail;
2831 for (i = 0; i < more_dsd_lists; i++) {
2832 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2834 ql_log(ql_log_fatal, vha, 0x300e,
2835 "Failed to allocate memory for dsd_dma "
2836 "for cmd=%p.\n", cmd);
2840 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2841 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2842 if (!dsd_ptr->dsd_addr) {
2844 ql_log(ql_log_fatal, vha, 0x300f,
2845 "Failed to allocate memory for dsd_addr "
2846 "for cmd=%p.\n", cmd);
2849 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2850 ha->gbl_dsd_avail++;
2856 if (req->cnt < (req_cnt + 2)) {
2857 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2858 ®->req_q_out[0]);
2859 if (req->ring_index < cnt)
2860 req->cnt = cnt - req->ring_index;
2862 req->cnt = req->length -
2863 (req->ring_index - cnt);
2864 if (req->cnt < (req_cnt + 2))
2868 ctx = sp->u.scmd.ctx =
2869 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2871 ql_log(ql_log_fatal, vha, 0x3010,
2872 "Failed to allocate ctx for cmd=%p.\n", cmd);
2876 memset(ctx, 0, sizeof(struct ct6_dsd));
2877 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2878 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2879 if (!ctx->fcp_cmnd) {
2880 ql_log(ql_log_fatal, vha, 0x3011,
2881 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2885 /* Initialize the DSD list and dma handle */
2886 INIT_LIST_HEAD(&ctx->dsd_list);
2887 ctx->dsd_use_cnt = 0;
2889 if (cmd->cmd_len > 16) {
2890 additional_cdb_len = cmd->cmd_len - 16;
2891 if ((cmd->cmd_len % 4) != 0) {
2892 /* SCSI command bigger than 16 bytes must be
2895 ql_log(ql_log_warn, vha, 0x3012,
2896 "scsi cmd len %d not multiple of 4 "
2897 "for cmd=%p.\n", cmd->cmd_len, cmd);
2898 goto queuing_error_fcp_cmnd;
2900 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2902 additional_cdb_len = 0;
2903 ctx->fcp_cmnd_len = 12 + 16 + 4;
2906 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2907 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2909 /* Zero out remaining portion of packet. */
2910 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2911 clr_ptr = (uint32_t *)cmd_pkt + 2;
2912 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2913 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2915 /* Set NPORT-ID and LUN number*/
2916 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2917 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2918 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2919 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2920 cmd_pkt->vp_index = sp->vha->vp_idx;
2922 /* Build IOCB segments */
2923 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2924 goto queuing_error_fcp_cmnd;
2926 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2927 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2929 /* build FCP_CMND IU */
2930 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2931 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2932 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2934 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2935 ctx->fcp_cmnd->additional_cdb_len |= 1;
2936 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2937 ctx->fcp_cmnd->additional_cdb_len |= 2;
2939 /* Populate the FCP_PRIO. */
2940 if (ha->flags.fcp_prio_enabled)
2941 ctx->fcp_cmnd->task_attribute |=
2942 sp->fcport->fcp_prio << 3;
2944 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2946 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2947 additional_cdb_len);
2948 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2950 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2951 cmd_pkt->fcp_cmnd_dseg_address[0] =
2952 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2953 cmd_pkt->fcp_cmnd_dseg_address[1] =
2954 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2956 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2957 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2958 /* Set total data segment count. */
2959 cmd_pkt->entry_count = (uint8_t)req_cnt;
2960 /* Specify response queue number where
2961 * completion should happen
2963 cmd_pkt->entry_status = (uint8_t) rsp->id;
2965 struct cmd_type_7 *cmd_pkt;
2966 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2967 if (req->cnt < (req_cnt + 2)) {
2968 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2969 ®->req_q_out[0]);
2970 if (req->ring_index < cnt)
2971 req->cnt = cnt - req->ring_index;
2973 req->cnt = req->length -
2974 (req->ring_index - cnt);
2976 if (req->cnt < (req_cnt + 2))
2979 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2980 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2982 /* Zero out remaining portion of packet. */
2983 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2984 clr_ptr = (uint32_t *)cmd_pkt + 2;
2985 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2986 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2988 /* Set NPORT-ID and LUN number*/
2989 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2990 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2991 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2992 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2993 cmd_pkt->vp_index = sp->vha->vp_idx;
2995 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2996 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2997 sizeof(cmd_pkt->lun));
2999 /* Populate the FCP_PRIO. */
3000 if (ha->flags.fcp_prio_enabled)
3001 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3003 /* Load SCSI command packet. */
3004 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3005 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3007 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3009 /* Build IOCB segments */
3010 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3012 /* Set total data segment count. */
3013 cmd_pkt->entry_count = (uint8_t)req_cnt;
3014 /* Specify response queue number where
3015 * completion should happen.
3017 cmd_pkt->entry_status = (uint8_t) rsp->id;
3020 /* Build command packet. */
3021 req->current_outstanding_cmd = handle;
3022 req->outstanding_cmds[handle] = sp;
3023 sp->handle = handle;
3024 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3025 req->cnt -= req_cnt;
3028 /* Adjust ring index. */
3030 if (req->ring_index == req->length) {
3031 req->ring_index = 0;
3032 req->ring_ptr = req->ring;
3036 sp->flags |= SRB_DMA_VALID;
3038 /* Set chip new ring index. */
3039 /* write, read and verify logic */
3040 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3042 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3044 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3046 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3047 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3052 /* Manage unprocessed RIO/ZIO commands in response queue. */
3053 if (vha->flags.process_response_queue &&
3054 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3055 qla24xx_process_response_queue(vha, rsp);
3057 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3060 queuing_error_fcp_cmnd:
3061 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3064 scsi_dma_unmap(cmd);
3066 if (sp->u.scmd.ctx) {
3067 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3068 sp->u.scmd.ctx = NULL;
3070 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3072 return QLA_FUNCTION_FAILED;
3076 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3078 struct srb_iocb *aio = &sp->u.iocb_cmd;
3079 scsi_qla_host_t *vha = sp->vha;
3080 struct req_que *req = vha->req;
3082 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3083 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3084 abt_iocb->entry_count = 1;
3085 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3086 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3087 abt_iocb->handle_to_abort =
3088 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3089 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3090 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3091 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3092 abt_iocb->vp_index = vha->vp_idx;
3093 abt_iocb->req_que_no = cpu_to_le16(req->id);
3094 /* Send the command to the firmware */
3099 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3103 mbx->entry_type = MBX_IOCB_TYPE;
3104 mbx->handle = sp->handle;
3105 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3107 for (i = 0; i < sz; i++)
3108 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3112 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3114 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3115 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3116 ct_pkt->handle = sp->handle;
3119 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3120 struct nack_to_isp *nack)
3122 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3124 nack->entry_type = NOTIFY_ACK_TYPE;
3125 nack->entry_count = 1;
3126 nack->ox_id = ntfy->ox_id;
3128 nack->u.isp24.handle = sp->handle;
3129 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3130 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3131 nack->u.isp24.flags = ntfy->u.isp24.flags &
3132 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3134 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3135 nack->u.isp24.status = ntfy->u.isp24.status;
3136 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3137 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3138 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3139 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3140 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3141 nack->u.isp24.srr_flags = 0;
3142 nack->u.isp24.srr_reject_code = 0;
3143 nack->u.isp24.srr_reject_code_expl = 0;
3144 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3148 * Build NVME LS request
3151 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3153 struct srb_iocb *nvme;
3154 int rval = QLA_SUCCESS;
3156 nvme = &sp->u.iocb_cmd;
3157 cmd_pkt->entry_type = PT_LS4_REQUEST;
3158 cmd_pkt->entry_count = 1;
3159 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3161 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3162 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3163 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3165 cmd_pkt->tx_dseg_count = 1;
3166 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3167 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3168 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3169 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3171 cmd_pkt->rx_dseg_count = 1;
3172 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3173 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3174 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3175 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3181 qla2x00_start_sp(srb_t *sp)
3184 scsi_qla_host_t *vha = sp->vha;
3185 struct qla_hw_data *ha = vha->hw;
3187 unsigned long flags;
3189 rval = QLA_FUNCTION_FAILED;
3190 spin_lock_irqsave(&ha->hardware_lock, flags);
3191 pkt = qla2x00_alloc_iocbs(vha, sp);
3193 ql_log(ql_log_warn, vha, 0x700c,
3194 "qla2x00_alloc_iocbs failed.\n");
3201 IS_FWI2_CAPABLE(ha) ?
3202 qla24xx_login_iocb(sp, pkt) :
3203 qla2x00_login_iocb(sp, pkt);
3206 qla24xx_prli_iocb(sp, pkt);
3208 case SRB_LOGOUT_CMD:
3209 IS_FWI2_CAPABLE(ha) ?
3210 qla24xx_logout_iocb(sp, pkt) :
3211 qla2x00_logout_iocb(sp, pkt);
3213 case SRB_ELS_CMD_RPT:
3214 case SRB_ELS_CMD_HST:
3215 qla24xx_els_iocb(sp, pkt);
3218 IS_FWI2_CAPABLE(ha) ?
3219 qla24xx_ct_iocb(sp, pkt) :
3220 qla2x00_ct_iocb(sp, pkt);
3223 IS_FWI2_CAPABLE(ha) ?
3224 qla24xx_adisc_iocb(sp, pkt) :
3225 qla2x00_adisc_iocb(sp, pkt);
3229 qlafx00_tm_iocb(sp, pkt) :
3230 qla24xx_tm_iocb(sp, pkt);
3232 case SRB_FXIOCB_DCMD:
3233 case SRB_FXIOCB_BCMD:
3234 qlafx00_fxdisc_iocb(sp, pkt);
3237 qla_nvme_ls(sp, pkt);
3241 qlafx00_abort_iocb(sp, pkt) :
3242 qla24xx_abort_iocb(sp, pkt);
3245 qla24xx_els_logo_iocb(sp, pkt);
3247 case SRB_CT_PTHRU_CMD:
3248 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3251 qla2x00_mb_iocb(sp, pkt);
3253 case SRB_NACK_PLOGI:
3256 qla2x00_send_notify_ack_iocb(sp, pkt);
3263 qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3265 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3270 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3271 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3273 uint16_t avail_dsds;
3275 uint32_t req_data_len = 0;
3276 uint32_t rsp_data_len = 0;
3277 struct scatterlist *sg;
3279 int entry_count = 1;
3280 struct bsg_job *bsg_job = sp->u.bsg_job;
3282 /*Update entry type to indicate bidir command */
3283 *((uint32_t *)(&cmd_pkt->entry_type)) =
3284 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3286 /* Set the transfer direction, in this set both flags
3287 * Also set the BD_WRAP_BACK flag, firmware will take care
3288 * assigning DID=SID for outgoing pkts.
3290 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3291 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3292 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3295 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3296 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3297 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3298 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3300 vha->bidi_stats.transfer_bytes += req_data_len;
3301 vha->bidi_stats.io_count++;
3303 vha->qla_stats.output_bytes += req_data_len;
3304 vha->qla_stats.output_requests++;
3306 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3307 * are bundled in continuation iocb
3310 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3314 for_each_sg(bsg_job->request_payload.sg_list, sg,
3315 bsg_job->request_payload.sg_cnt, index) {
3317 cont_a64_entry_t *cont_pkt;
3319 /* Allocate additional continuation packets */
3320 if (avail_dsds == 0) {
3321 /* Continuation type 1 IOCB can accomodate
3324 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3325 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3329 sle_dma = sg_dma_address(sg);
3330 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3331 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3332 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3335 /* For read request DSD will always goes to continuation IOCB
3336 * and follow the write DSD. If there is room on the current IOCB
3337 * then it is added to that IOCB else new continuation IOCB is
3340 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3341 bsg_job->reply_payload.sg_cnt, index) {
3343 cont_a64_entry_t *cont_pkt;
3345 /* Allocate additional continuation packets */
3346 if (avail_dsds == 0) {
3347 /* Continuation type 1 IOCB can accomodate
3350 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3351 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3355 sle_dma = sg_dma_address(sg);
3356 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3357 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3358 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3361 /* This value should be same as number of IOCB required for this cmd */
3362 cmd_pkt->entry_count = entry_count;
3366 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3369 struct qla_hw_data *ha = vha->hw;
3370 unsigned long flags;
3376 struct cmd_bidir *cmd_pkt = NULL;
3377 struct rsp_que *rsp;
3378 struct req_que *req;
3379 int rval = EXT_STATUS_OK;
3383 rsp = ha->rsp_q_map[0];
3386 /* Send marker if required */
3387 if (vha->marker_needed != 0) {
3388 if (qla2x00_marker(vha, req,
3389 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3390 return EXT_STATUS_MAILBOX;
3391 vha->marker_needed = 0;
3394 /* Acquire ring specific lock */
3395 spin_lock_irqsave(&ha->hardware_lock, flags);
3397 /* Check for room in outstanding command list. */
3398 handle = req->current_outstanding_cmd;
3399 for (index = 1; index < req->num_outstanding_cmds; index++) {
3401 if (handle == req->num_outstanding_cmds)
3403 if (!req->outstanding_cmds[handle])
3407 if (index == req->num_outstanding_cmds) {
3408 rval = EXT_STATUS_BUSY;
3412 /* Calculate number of IOCB required */
3413 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3415 /* Check for room on request queue. */
3416 if (req->cnt < req_cnt + 2) {
3417 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3418 RD_REG_DWORD_RELAXED(req->req_q_out);
3419 if (req->ring_index < cnt)
3420 req->cnt = cnt - req->ring_index;
3422 req->cnt = req->length -
3423 (req->ring_index - cnt);
3425 if (req->cnt < req_cnt + 2) {
3426 rval = EXT_STATUS_BUSY;
3430 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3431 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3433 /* Zero out remaining portion of packet. */
3434 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3435 clr_ptr = (uint32_t *)cmd_pkt + 2;
3436 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3438 /* Set NPORT-ID (of vha)*/
3439 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3440 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3441 cmd_pkt->port_id[1] = vha->d_id.b.area;
3442 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3444 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3445 cmd_pkt->entry_status = (uint8_t) rsp->id;
3446 /* Build command packet. */
3447 req->current_outstanding_cmd = handle;
3448 req->outstanding_cmds[handle] = sp;
3449 sp->handle = handle;
3450 req->cnt -= req_cnt;
3452 /* Send the command to the firmware */
3454 qla2x00_start_iocbs(vha, req);
3456 spin_unlock_irqrestore(&ha->hardware_lock, flags);