2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/mempool.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/workqueue.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/delay.h>
29 #include <linux/gfp.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/fc/fc_els.h>
36 #include <scsi/fc/fc_fcoe.h>
37 #include <scsi/libfc.h>
38 #include <scsi/fc_frame.h>
42 const char *fnic_state_str[] = {
43 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
44 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
45 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
46 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
49 static const char *fnic_ioreq_state_str[] = {
50 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
51 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
52 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
53 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
54 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
57 static const char *fcpio_status_str[] = {
58 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
59 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
60 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
61 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
62 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
63 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
64 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
65 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
66 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
67 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
68 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
69 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
70 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
71 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
72 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
73 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
74 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
75 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
76 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
79 const char *fnic_state_to_str(unsigned int state)
81 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
84 return fnic_state_str[state];
87 static const char *fnic_ioreq_state_to_str(unsigned int state)
89 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
90 !fnic_ioreq_state_str[state])
93 return fnic_ioreq_state_str[state];
96 static const char *fnic_fcpio_status_to_str(unsigned int status)
98 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
101 return fcpio_status_str[status];
104 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
106 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
107 struct scsi_cmnd *sc)
109 u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
111 return &fnic->io_req_lock[hash];
114 static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
117 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
121 * Unmap the data buffer and sense buffer for an io_req,
122 * also unmap and free the device-private scatter/gather list.
124 static void fnic_release_ioreq_buf(struct fnic *fnic,
125 struct fnic_io_req *io_req,
126 struct scsi_cmnd *sc)
128 if (io_req->sgl_list_pa)
129 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
130 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
135 mempool_free(io_req->sgl_list_alloc,
136 fnic->io_sgl_pool[io_req->sgl_type]);
137 if (io_req->sense_buf_pa)
138 pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
139 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
142 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
145 /* if no Ack received from firmware, then nothing to clean */
146 if (!fnic->fw_ack_recd[0])
150 * Update desc_available count based on number of freed descriptors
151 * Account for wraparound
153 if (wq->to_clean_index <= fnic->fw_ack_index[0])
154 wq->ring.desc_avail += (fnic->fw_ack_index[0]
155 - wq->to_clean_index + 1);
157 wq->ring.desc_avail += (wq->ring.desc_count
159 + fnic->fw_ack_index[0] + 1);
162 * just bump clean index to ack_index+1 accounting for wraparound
163 * this will essentially free up all descriptors between
164 * to_clean_index and fw_ack_index, both inclusive
167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
169 /* we have processed the acks received so far */
170 fnic->fw_ack_recd[0] = 0;
176 * __fnic_set_state_flags
177 * Sets/Clears bits in fnic's state_flags
180 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
181 unsigned long clearbits)
183 struct Scsi_Host *host = fnic->lport->host;
184 int sh_locked = spin_is_locked(host->host_lock);
185 unsigned long flags = 0;
188 spin_lock_irqsave(host->host_lock, flags);
191 fnic->state_flags &= ~st_flags;
193 fnic->state_flags |= st_flags;
196 spin_unlock_irqrestore(host->host_lock, flags);
203 * fnic_fw_reset_handler
204 * Routine to send reset msg to fw
206 int fnic_fw_reset_handler(struct fnic *fnic)
208 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
212 /* indicate fwreset to io path */
213 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
215 skb_queue_purge(&fnic->frame_queue);
216 skb_queue_purge(&fnic->tx_queue);
218 /* wait for io cmpl */
219 while (atomic_read(&fnic->in_flight))
220 schedule_timeout(msecs_to_jiffies(1));
222 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
224 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
225 free_wq_copy_descs(fnic, wq);
227 if (!vnic_wq_copy_desc_avail(wq))
230 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
231 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
232 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
233 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
234 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
236 &fnic->fnic_stats.fw_stats.active_fw_reqs));
239 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
242 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
243 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
244 "Issued fw reset\n");
246 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
247 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
248 "Failed to issue fw reset\n");
256 * fnic_flogi_reg_handler
257 * Routine to send flogi register msg to fw
259 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
261 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
262 enum fcpio_flogi_reg_format_type format;
263 struct fc_lport *lp = fnic->lport;
268 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
270 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
271 free_wq_copy_descs(fnic, wq);
273 if (!vnic_wq_copy_desc_avail(wq)) {
275 goto flogi_reg_ioreq_end;
278 if (fnic->ctlr.map_dest) {
279 memset(gw_mac, 0xff, ETH_ALEN);
280 format = FCPIO_FLOGI_REG_DEF_DEST;
282 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
283 format = FCPIO_FLOGI_REG_GW_DEST;
286 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
287 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
290 lp->r_a_tov, lp->e_d_tov);
291 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
292 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
293 fc_id, fnic->data_src_addr, gw_mac);
295 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
296 format, fc_id, gw_mac);
297 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
298 "FLOGI reg issued fcid %x map %d dest %pM\n",
299 fc_id, fnic->ctlr.map_dest, gw_mac);
302 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
303 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
304 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
305 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
306 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
309 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
314 * fnic_queue_wq_copy_desc
315 * Routine to enqueue a wq copy desc
317 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
318 struct vnic_wq_copy *wq,
319 struct fnic_io_req *io_req,
320 struct scsi_cmnd *sc,
323 struct scatterlist *sg;
324 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
325 struct fc_rport_libfc_priv *rp = rport->dd_data;
326 struct host_sg_desc *desc;
327 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
329 unsigned long intr_flags;
332 struct scsi_lun fc_lun;
336 /* For each SGE, create a device desc entry */
337 desc = io_req->sgl_list;
338 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
339 desc->addr = cpu_to_le64(sg_dma_address(sg));
340 desc->len = cpu_to_le32(sg_dma_len(sg));
345 io_req->sgl_list_pa = pci_map_single
348 sizeof(io_req->sgl_list[0]) * sg_count,
351 r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
353 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
354 return SCSI_MLQUEUE_HOST_BUSY;
358 io_req->sense_buf_pa = pci_map_single(fnic->pdev,
360 SCSI_SENSE_BUFFERSIZE,
363 r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
365 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
366 sizeof(io_req->sgl_list[0]) * sg_count,
368 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
369 return SCSI_MLQUEUE_HOST_BUSY;
372 int_to_scsilun(sc->device->lun, &fc_lun);
374 /* Enqueue the descriptor in the Copy WQ */
375 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
377 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
378 free_wq_copy_descs(fnic, wq);
380 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
381 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
382 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
383 "fnic_queue_wq_copy_desc failure - no descriptors\n");
384 atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
385 return SCSI_MLQUEUE_HOST_BUSY;
389 if (sc->sc_data_direction == DMA_FROM_DEVICE)
390 flags = FCPIO_ICMND_RDDATA;
391 else if (sc->sc_data_direction == DMA_TO_DEVICE)
392 flags = FCPIO_ICMND_WRDATA;
395 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
396 (rp->flags & FC_RP_FLAGS_RETRY))
397 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
399 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
400 0, exch_flags, io_req->sgl_cnt,
401 SCSI_SENSE_BUFFERSIZE,
403 io_req->sense_buf_pa,
404 0, /* scsi cmd ref, always 0 */
405 FCPIO_ICMND_PTA_SIMPLE,
406 /* scsi pri and tag */
407 flags, /* command flags */
408 sc->cmnd, sc->cmd_len,
410 fc_lun.scsi_lun, io_req->port_id,
411 rport->maxframe_size, rp->r_a_tov,
414 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
415 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
416 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
417 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
418 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
420 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
426 * Routine to send a scsi cdb
427 * Called with host_lock held and interrupts disabled.
429 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
431 struct fc_lport *lp = shost_priv(sc->device->host);
432 struct fc_rport *rport;
433 struct fnic_io_req *io_req = NULL;
434 struct fnic *fnic = lport_priv(lp);
435 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
436 struct vnic_wq_copy *wq;
440 unsigned long flags = 0;
442 spinlock_t *io_lock = NULL;
443 int io_lock_acquired = 0;
444 struct fc_rport_libfc_priv *rp;
446 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
447 return SCSI_MLQUEUE_HOST_BUSY;
449 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
450 return SCSI_MLQUEUE_HOST_BUSY;
452 rport = starget_to_rport(scsi_target(sc->device));
454 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
455 "returning DID_NO_CONNECT for IO as rport is NULL\n");
456 sc->result = DID_NO_CONNECT << 16;
461 ret = fc_remote_port_chkready(rport);
463 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
464 "rport is not ready\n");
465 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
472 if (!rp || rp->rp_state == RPORT_ST_DELETE) {
473 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
474 "rport 0x%x removed, returning DID_NO_CONNECT\n",
477 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
478 sc->result = DID_NO_CONNECT<<16;
483 if (rp->rp_state != RPORT_ST_READY) {
484 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
485 "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
486 rport->port_id, rp->rp_state);
488 sc->result = DID_IMM_RETRY << 16;
493 if (lp->state != LPORT_ST_READY || !(lp->link_up))
494 return SCSI_MLQUEUE_HOST_BUSY;
496 atomic_inc(&fnic->in_flight);
499 * Release host lock, use driver resource specific locks from here.
500 * Don't re-enable interrupts in case they were disabled prior to the
501 * caller disabling them.
503 spin_unlock(lp->host->host_lock);
504 CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
505 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
507 /* Get a new io_req for this SCSI IO */
508 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
510 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
511 ret = SCSI_MLQUEUE_HOST_BUSY;
514 memset(io_req, 0, sizeof(*io_req));
516 /* Map the data buffer */
517 sg_count = scsi_dma_map(sc);
519 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
520 sc->request->tag, sc, 0, sc->cmnd[0],
521 sg_count, CMD_STATE(sc));
522 mempool_free(io_req, fnic->io_req_pool);
526 /* Determine the type of scatter/gather list we need */
527 io_req->sgl_cnt = sg_count;
528 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
529 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
530 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
534 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
536 if (!io_req->sgl_list) {
537 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
538 ret = SCSI_MLQUEUE_HOST_BUSY;
540 mempool_free(io_req, fnic->io_req_pool);
544 /* Cache sgl list allocated address before alignment */
545 io_req->sgl_list_alloc = io_req->sgl_list;
546 ptr = (unsigned long) io_req->sgl_list;
547 if (ptr % FNIC_SG_DESC_ALIGN) {
548 io_req->sgl_list = (struct host_sg_desc *)
549 (((unsigned long) ptr
550 + FNIC_SG_DESC_ALIGN - 1)
551 & ~(FNIC_SG_DESC_ALIGN - 1));
556 * Will acquire lock defore setting to IO initialized.
559 io_lock = fnic_io_lock_hash(fnic, sc);
560 spin_lock_irqsave(io_lock, flags);
562 /* initialize rest of io_req */
563 io_lock_acquired = 1;
564 io_req->port_id = rport->port_id;
565 io_req->start_time = jiffies;
566 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
567 CMD_SP(sc) = (char *)io_req;
568 CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
569 sc->scsi_done = done;
571 /* create copy wq desc and enqueue it */
572 wq = &fnic->wq_copy[0];
573 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
576 * In case another thread cancelled the request,
577 * refetch the pointer under the lock.
579 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
580 sc->request->tag, sc, 0, 0, 0,
581 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
582 io_req = (struct fnic_io_req *)CMD_SP(sc);
584 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
585 spin_unlock_irqrestore(io_lock, flags);
587 fnic_release_ioreq_buf(fnic, io_req, sc);
588 mempool_free(io_req, fnic->io_req_pool);
590 atomic_dec(&fnic->in_flight);
591 /* acquire host lock before returning to SCSI */
592 spin_lock(lp->host->host_lock);
595 atomic64_inc(&fnic_stats->io_stats.active_ios);
596 atomic64_inc(&fnic_stats->io_stats.num_ios);
597 if (atomic64_read(&fnic_stats->io_stats.active_ios) >
598 atomic64_read(&fnic_stats->io_stats.max_active_ios))
599 atomic64_set(&fnic_stats->io_stats.max_active_ios,
600 atomic64_read(&fnic_stats->io_stats.active_ios));
602 /* REVISIT: Use per IO lock in the final code */
603 CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
606 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
607 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
608 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
611 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
612 sc->request->tag, sc, io_req,
614 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
616 /* if only we issued IO, will we have the io lock */
617 if (io_lock_acquired)
618 spin_unlock_irqrestore(io_lock, flags);
620 atomic_dec(&fnic->in_flight);
621 /* acquire host lock before returning to SCSI */
622 spin_lock(lp->host->host_lock);
626 DEF_SCSI_QCMD(fnic_queuecommand)
629 * fnic_fcpio_fw_reset_cmpl_handler
630 * Routine to handle fw reset completion
632 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
633 struct fcpio_fw_req *desc)
637 struct fcpio_tag tag;
640 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
642 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
644 atomic64_inc(&reset_stats->fw_reset_completions);
646 /* Clean up all outstanding io requests */
647 fnic_cleanup_io(fnic, SCSI_NO_TAG);
649 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
650 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
651 atomic64_set(&fnic->io_cmpl_skip, 0);
653 spin_lock_irqsave(&fnic->fnic_lock, flags);
655 /* fnic should be in FC_TRANS_ETH_MODE */
656 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
657 /* Check status of reset completion */
659 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
660 "reset cmpl success\n");
661 /* Ready to send flogi out */
662 fnic->state = FNIC_IN_ETH_MODE;
664 FNIC_SCSI_DBG(KERN_DEBUG,
666 "fnic fw_reset : failed %s\n",
667 fnic_fcpio_status_to_str(hdr_status));
670 * Unable to change to eth mode, cannot send out flogi
671 * Change state to fc mode, so that subsequent Flogi
672 * requests from libFC will cause more attempts to
673 * reset the firmware. Free the cached flogi
675 fnic->state = FNIC_IN_FC_MODE;
676 atomic64_inc(&reset_stats->fw_reset_failures);
680 FNIC_SCSI_DBG(KERN_DEBUG,
682 "Unexpected state %s while processing"
683 " reset cmpl\n", fnic_state_to_str(fnic->state));
684 atomic64_inc(&reset_stats->fw_reset_failures);
688 /* Thread removing device blocks till firmware reset is complete */
689 if (fnic->remove_wait)
690 complete(fnic->remove_wait);
693 * If fnic is being removed, or fw reset failed
694 * free the flogi frame. Else, send it out
696 if (fnic->remove_wait || ret) {
697 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
698 skb_queue_purge(&fnic->tx_queue);
699 goto reset_cmpl_handler_end;
702 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
706 reset_cmpl_handler_end:
707 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
713 * fnic_fcpio_flogi_reg_cmpl_handler
714 * Routine to handle flogi register completion
716 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
717 struct fcpio_fw_req *desc)
721 struct fcpio_tag tag;
725 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
727 /* Update fnic state based on status of flogi reg completion */
728 spin_lock_irqsave(&fnic->fnic_lock, flags);
730 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
732 /* Check flogi registration completion status */
734 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
735 "flog reg succeeded\n");
736 fnic->state = FNIC_IN_FC_MODE;
738 FNIC_SCSI_DBG(KERN_DEBUG,
740 "fnic flogi reg :failed %s\n",
741 fnic_fcpio_status_to_str(hdr_status));
742 fnic->state = FNIC_IN_ETH_MODE;
746 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
747 "Unexpected fnic state %s while"
748 " processing flogi reg completion\n",
749 fnic_state_to_str(fnic->state));
754 if (fnic->stop_rx_link_events) {
755 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
756 goto reg_cmpl_handler_end;
758 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
761 queue_work(fnic_event_queue, &fnic->frame_work);
763 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
766 reg_cmpl_handler_end:
770 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
773 if (wq->to_clean_index <= wq->to_use_index) {
774 /* out of range, stale request_out index */
775 if (request_out < wq->to_clean_index ||
776 request_out >= wq->to_use_index)
779 /* out of range, stale request_out index */
780 if (request_out < wq->to_clean_index &&
781 request_out >= wq->to_use_index)
784 /* request_out index is in range */
790 * Mark that ack received and store the Ack index. If there are multiple
791 * acks received before Tx thread cleans it up, the latest value will be
792 * used which is correct behavior. This state should be in the copy Wq
793 * instead of in the fnic
795 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
796 unsigned int cq_index,
797 struct fcpio_fw_req *desc)
799 struct vnic_wq_copy *wq;
800 u16 request_out = desc->u.ack.request_out;
802 u64 *ox_id_tag = (u64 *)(void *)desc;
804 /* mark the ack state */
805 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
806 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
808 fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
809 if (is_ack_index_in_range(wq, request_out)) {
810 fnic->fw_ack_index[0] = request_out;
811 fnic->fw_ack_recd[0] = 1;
814 &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
816 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
817 FNIC_TRACE(fnic_fcpio_ack_handler,
818 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
819 ox_id_tag[4], ox_id_tag[5]);
823 * fnic_fcpio_icmnd_cmpl_handler
824 * Routine to handle icmnd completions
826 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
827 struct fcpio_fw_req *desc)
831 struct fcpio_tag tag;
834 struct fcpio_icmnd_cmpl *icmnd_cmpl;
835 struct fnic_io_req *io_req;
836 struct scsi_cmnd *sc;
837 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
841 unsigned long start_time;
842 unsigned long io_duration_time;
844 /* Decode the cmpl description to get the io_req id */
845 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
846 fcpio_tag_id_dec(&tag, &id);
847 icmnd_cmpl = &desc->u.icmnd_cmpl;
849 if (id >= fnic->fnic_max_tag_id) {
850 shost_printk(KERN_ERR, fnic->lport->host,
851 "Tag out of range tag %x hdr status = %s\n",
852 id, fnic_fcpio_status_to_str(hdr_status));
856 sc = scsi_host_find_tag(fnic->lport->host, id);
859 atomic64_inc(&fnic_stats->io_stats.sc_null);
860 shost_printk(KERN_ERR, fnic->lport->host,
861 "icmnd_cmpl sc is null - "
862 "hdr status = %s tag = 0x%x desc = 0x%p\n",
863 fnic_fcpio_status_to_str(hdr_status), id, desc);
864 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
865 fnic->lport->host->host_no, id,
866 ((u64)icmnd_cmpl->_resvd0[1] << 16 |
867 (u64)icmnd_cmpl->_resvd0[0]),
868 ((u64)hdr_status << 16 |
869 (u64)icmnd_cmpl->scsi_status << 8 |
870 (u64)icmnd_cmpl->flags), desc,
871 (u64)icmnd_cmpl->residual, 0);
875 io_lock = fnic_io_lock_hash(fnic, sc);
876 spin_lock_irqsave(io_lock, flags);
877 io_req = (struct fnic_io_req *)CMD_SP(sc);
878 WARN_ON_ONCE(!io_req);
880 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
881 CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
882 spin_unlock_irqrestore(io_lock, flags);
883 shost_printk(KERN_ERR, fnic->lport->host,
884 "icmnd_cmpl io_req is null - "
885 "hdr status = %s tag = 0x%x sc 0x%p\n",
886 fnic_fcpio_status_to_str(hdr_status), id, sc);
889 start_time = io_req->start_time;
891 /* firmware completed the io */
892 io_req->io_completed = 1;
895 * if SCSI-ML has already issued abort on this command,
896 * set completion of the IO. The abts path will clean it up
898 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
901 * set the FNIC_IO_DONE so that this doesn't get
902 * flagged as 'out of order' if it was not aborted
904 CMD_FLAGS(sc) |= FNIC_IO_DONE;
905 CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
906 spin_unlock_irqrestore(io_lock, flags);
907 if(FCPIO_ABORTED == hdr_status)
908 CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
910 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
911 "icmnd_cmpl abts pending "
912 "hdr status = %s tag = 0x%x sc = 0x%p"
913 "scsi_status = %x residual = %d\n",
914 fnic_fcpio_status_to_str(hdr_status),
916 icmnd_cmpl->scsi_status,
917 icmnd_cmpl->residual);
921 /* Mark the IO as complete */
922 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
924 icmnd_cmpl = &desc->u.icmnd_cmpl;
926 switch (hdr_status) {
928 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
929 xfer_len = scsi_bufflen(sc);
930 scsi_set_resid(sc, icmnd_cmpl->residual);
932 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
933 xfer_len -= icmnd_cmpl->residual;
935 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
936 atomic64_inc(&fnic_stats->misc_stats.check_condition);
938 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
939 atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
942 case FCPIO_TIMEOUT: /* request was timed out */
943 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
944 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
947 case FCPIO_ABORTED: /* request was aborted */
948 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
949 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
952 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
953 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
954 scsi_set_resid(sc, icmnd_cmpl->residual);
955 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
958 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
959 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
960 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
963 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
964 atomic64_inc(&fnic_stats->io_stats.io_not_found);
965 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
968 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
969 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
970 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
973 case FCPIO_FW_ERR: /* request was terminated due fw error */
974 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
975 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
978 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
979 atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
980 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
983 case FCPIO_INVALID_HEADER: /* header contains invalid data */
984 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
985 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
987 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
991 /* Break link with the SCSI command */
993 CMD_FLAGS(sc) |= FNIC_IO_DONE;
995 spin_unlock_irqrestore(io_lock, flags);
997 if (hdr_status != FCPIO_SUCCESS) {
998 atomic64_inc(&fnic_stats->io_stats.io_failures);
999 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
1000 fnic_fcpio_status_to_str(hdr_status));
1003 fnic_release_ioreq_buf(fnic, io_req, sc);
1005 mempool_free(io_req, fnic->io_req_pool);
1007 cmd_trace = ((u64)hdr_status << 56) |
1008 (u64)icmnd_cmpl->scsi_status << 48 |
1009 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
1010 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1011 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
1013 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
1014 sc->device->host->host_no, id, sc,
1015 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
1016 (u64)icmnd_cmpl->_resvd0[0] << 48 |
1017 jiffies_to_msecs(jiffies - start_time)),
1019 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1021 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1022 fnic->lport->host_stats.fcp_input_requests++;
1023 fnic->fcp_input_bytes += xfer_len;
1024 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1025 fnic->lport->host_stats.fcp_output_requests++;
1026 fnic->fcp_output_bytes += xfer_len;
1028 fnic->lport->host_stats.fcp_control_requests++;
1030 atomic64_dec(&fnic_stats->io_stats.active_ios);
1031 if (atomic64_read(&fnic->io_cmpl_skip))
1032 atomic64_dec(&fnic->io_cmpl_skip);
1034 atomic64_inc(&fnic_stats->io_stats.io_completions);
1037 io_duration_time = jiffies_to_msecs(jiffies) -
1038 jiffies_to_msecs(start_time);
1040 if(io_duration_time <= 10)
1041 atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1042 else if(io_duration_time <= 100)
1043 atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1044 else if(io_duration_time <= 500)
1045 atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1046 else if(io_duration_time <= 5000)
1047 atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1048 else if(io_duration_time <= 10000)
1049 atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1050 else if(io_duration_time <= 30000)
1051 atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1053 atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1055 if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1056 atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1059 /* Call SCSI completion function to complete the IO */
1064 /* fnic_fcpio_itmf_cmpl_handler
1065 * Routine to handle itmf completions
1067 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1068 struct fcpio_fw_req *desc)
1072 struct fcpio_tag tag;
1074 struct scsi_cmnd *sc;
1075 struct fnic_io_req *io_req;
1076 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1077 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1078 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1079 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1080 unsigned long flags;
1081 spinlock_t *io_lock;
1082 unsigned long start_time;
1084 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1085 fcpio_tag_id_dec(&tag, &id);
1087 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1088 shost_printk(KERN_ERR, fnic->lport->host,
1089 "Tag out of range tag %x hdr status = %s\n",
1090 id, fnic_fcpio_status_to_str(hdr_status));
1094 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1097 atomic64_inc(&fnic_stats->io_stats.sc_null);
1098 shost_printk(KERN_ERR, fnic->lport->host,
1099 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1100 fnic_fcpio_status_to_str(hdr_status), id);
1103 io_lock = fnic_io_lock_hash(fnic, sc);
1104 spin_lock_irqsave(io_lock, flags);
1105 io_req = (struct fnic_io_req *)CMD_SP(sc);
1106 WARN_ON_ONCE(!io_req);
1108 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1109 spin_unlock_irqrestore(io_lock, flags);
1110 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1111 shost_printk(KERN_ERR, fnic->lport->host,
1112 "itmf_cmpl io_req is null - "
1113 "hdr status = %s tag = 0x%x sc 0x%p\n",
1114 fnic_fcpio_status_to_str(hdr_status), id, sc);
1117 start_time = io_req->start_time;
1119 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1120 /* Abort and terminate completion of device reset req */
1121 /* REVISIT : Add asserts about various flags */
1122 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1123 "dev reset abts cmpl recd. id %x status %s\n",
1124 id, fnic_fcpio_status_to_str(hdr_status));
1125 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1126 CMD_ABTS_STATUS(sc) = hdr_status;
1127 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1128 if (io_req->abts_done)
1129 complete(io_req->abts_done);
1130 spin_unlock_irqrestore(io_lock, flags);
1131 } else if (id & FNIC_TAG_ABORT) {
1132 /* Completion of abort cmd */
1133 switch (hdr_status) {
1137 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1138 atomic64_inc(&abts_stats->abort_fw_timeouts);
1141 &term_stats->terminate_fw_timeouts);
1143 case FCPIO_ITMF_REJECTED:
1144 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1145 "abort reject recd. id %d\n",
1146 (int)(id & FNIC_TAG_MASK));
1148 case FCPIO_IO_NOT_FOUND:
1149 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1150 atomic64_inc(&abts_stats->abort_io_not_found);
1153 &term_stats->terminate_io_not_found);
1156 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1157 atomic64_inc(&abts_stats->abort_failures);
1160 &term_stats->terminate_failures);
1163 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1164 /* This is a late completion. Ignore it */
1165 spin_unlock_irqrestore(io_lock, flags);
1169 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1170 CMD_ABTS_STATUS(sc) = hdr_status;
1172 /* If the status is IO not found consider it as success */
1173 if (hdr_status == FCPIO_IO_NOT_FOUND)
1174 CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
1176 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1177 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1179 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1180 "abts cmpl recd. id %d status %s\n",
1181 (int)(id & FNIC_TAG_MASK),
1182 fnic_fcpio_status_to_str(hdr_status));
1185 * If scsi_eh thread is blocked waiting for abts to complete,
1186 * signal completion to it. IO will be cleaned in the thread
1187 * else clean it in this context
1189 if (io_req->abts_done) {
1190 complete(io_req->abts_done);
1191 spin_unlock_irqrestore(io_lock, flags);
1193 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1194 "abts cmpl, completing IO\n");
1196 sc->result = (DID_ERROR << 16);
1198 spin_unlock_irqrestore(io_lock, flags);
1200 fnic_release_ioreq_buf(fnic, io_req, sc);
1201 mempool_free(io_req, fnic->io_req_pool);
1202 if (sc->scsi_done) {
1203 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1204 sc->device->host->host_no, id,
1206 jiffies_to_msecs(jiffies - start_time),
1208 (((u64)hdr_status << 40) |
1209 (u64)sc->cmnd[0] << 32 |
1210 (u64)sc->cmnd[2] << 24 |
1211 (u64)sc->cmnd[3] << 16 |
1212 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1213 (((u64)CMD_FLAGS(sc) << 32) |
1216 atomic64_dec(&fnic_stats->io_stats.active_ios);
1217 if (atomic64_read(&fnic->io_cmpl_skip))
1218 atomic64_dec(&fnic->io_cmpl_skip);
1220 atomic64_inc(&fnic_stats->io_stats.io_completions);
1224 } else if (id & FNIC_TAG_DEV_RST) {
1225 /* Completion of device reset */
1226 CMD_LR_STATUS(sc) = hdr_status;
1227 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1228 spin_unlock_irqrestore(io_lock, flags);
1229 CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1230 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1231 sc->device->host->host_no, id, sc,
1232 jiffies_to_msecs(jiffies - start_time),
1234 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1235 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1236 "Terminate pending "
1237 "dev reset cmpl recd. id %d status %s\n",
1238 (int)(id & FNIC_TAG_MASK),
1239 fnic_fcpio_status_to_str(hdr_status));
1242 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1243 /* Need to wait for terminate completion */
1244 spin_unlock_irqrestore(io_lock, flags);
1245 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1246 sc->device->host->host_no, id, sc,
1247 jiffies_to_msecs(jiffies - start_time),
1249 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1250 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1251 "dev reset cmpl recd after time out. "
1252 "id %d status %s\n",
1253 (int)(id & FNIC_TAG_MASK),
1254 fnic_fcpio_status_to_str(hdr_status));
1257 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1258 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1259 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1260 "dev reset cmpl recd. id %d status %s\n",
1261 (int)(id & FNIC_TAG_MASK),
1262 fnic_fcpio_status_to_str(hdr_status));
1263 if (io_req->dr_done)
1264 complete(io_req->dr_done);
1265 spin_unlock_irqrestore(io_lock, flags);
1268 shost_printk(KERN_ERR, fnic->lport->host,
1269 "Unexpected itmf io state %s tag %x\n",
1270 fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1271 spin_unlock_irqrestore(io_lock, flags);
1277 * fnic_fcpio_cmpl_handler
1278 * Routine to service the cq for wq_copy
1280 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1281 unsigned int cq_index,
1282 struct fcpio_fw_req *desc)
1284 struct fnic *fnic = vnic_dev_priv(vdev);
1286 switch (desc->hdr.type) {
1287 case FCPIO_ICMND_CMPL: /* fw completed a command */
1288 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1289 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1290 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1291 case FCPIO_RESET_CMPL: /* fw completed reset */
1292 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1298 switch (desc->hdr.type) {
1299 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1300 fnic_fcpio_ack_handler(fnic, cq_index, desc);
1303 case FCPIO_ICMND_CMPL: /* fw completed a command */
1304 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1307 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1308 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1311 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1312 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1313 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1316 case FCPIO_RESET_CMPL: /* fw completed reset */
1317 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1321 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1322 "firmware completion type %d\n",
1331 * fnic_wq_copy_cmpl_handler
1332 * Routine to process wq copy
1334 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1336 unsigned int wq_work_done = 0;
1337 unsigned int i, cq_index;
1338 unsigned int cur_work_done;
1340 for (i = 0; i < fnic->wq_copy_count; i++) {
1341 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1342 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1343 fnic_fcpio_cmpl_handler,
1345 wq_work_done += cur_work_done;
1347 return wq_work_done;
1350 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1353 struct fnic_io_req *io_req;
1354 unsigned long flags = 0;
1355 struct scsi_cmnd *sc;
1356 spinlock_t *io_lock;
1357 unsigned long start_time = 0;
1358 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1360 for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1361 if (i == exclude_id)
1364 io_lock = fnic_io_lock_tag(fnic, i);
1365 spin_lock_irqsave(io_lock, flags);
1366 sc = scsi_host_find_tag(fnic->lport->host, i);
1368 spin_unlock_irqrestore(io_lock, flags);
1372 io_req = (struct fnic_io_req *)CMD_SP(sc);
1373 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1374 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1376 * We will be here only when FW completes reset
1377 * without sending completions for outstanding ios.
1379 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1380 if (io_req && io_req->dr_done)
1381 complete(io_req->dr_done);
1382 else if (io_req && io_req->abts_done)
1383 complete(io_req->abts_done);
1384 spin_unlock_irqrestore(io_lock, flags);
1386 } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1387 spin_unlock_irqrestore(io_lock, flags);
1391 spin_unlock_irqrestore(io_lock, flags);
1392 goto cleanup_scsi_cmd;
1397 spin_unlock_irqrestore(io_lock, flags);
1400 * If there is a scsi_cmnd associated with this io_req, then
1401 * free the corresponding state
1403 start_time = io_req->start_time;
1404 fnic_release_ioreq_buf(fnic, io_req, sc);
1405 mempool_free(io_req, fnic->io_req_pool);
1408 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1409 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1410 "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
1411 __func__, (jiffies - start_time));
1413 if (atomic64_read(&fnic->io_cmpl_skip))
1414 atomic64_dec(&fnic->io_cmpl_skip);
1416 atomic64_inc(&fnic_stats->io_stats.io_completions);
1418 /* Complete the command to SCSI */
1419 if (sc->scsi_done) {
1420 FNIC_TRACE(fnic_cleanup_io,
1421 sc->device->host->host_no, i, sc,
1422 jiffies_to_msecs(jiffies - start_time),
1423 0, ((u64)sc->cmnd[0] << 32 |
1424 (u64)sc->cmnd[2] << 24 |
1425 (u64)sc->cmnd[3] << 16 |
1426 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1427 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1434 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1435 struct fcpio_host_req *desc)
1438 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1439 struct fnic_io_req *io_req;
1440 struct scsi_cmnd *sc;
1441 unsigned long flags;
1442 spinlock_t *io_lock;
1443 unsigned long start_time = 0;
1445 /* get the tag reference */
1446 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1447 id &= FNIC_TAG_MASK;
1449 if (id >= fnic->fnic_max_tag_id)
1452 sc = scsi_host_find_tag(fnic->lport->host, id);
1456 io_lock = fnic_io_lock_hash(fnic, sc);
1457 spin_lock_irqsave(io_lock, flags);
1459 /* Get the IO context which this desc refers to */
1460 io_req = (struct fnic_io_req *)CMD_SP(sc);
1462 /* fnic interrupts are turned off by now */
1465 spin_unlock_irqrestore(io_lock, flags);
1466 goto wq_copy_cleanup_scsi_cmd;
1471 spin_unlock_irqrestore(io_lock, flags);
1473 start_time = io_req->start_time;
1474 fnic_release_ioreq_buf(fnic, io_req, sc);
1475 mempool_free(io_req, fnic->io_req_pool);
1477 wq_copy_cleanup_scsi_cmd:
1478 sc->result = DID_NO_CONNECT << 16;
1479 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1480 " DID_NO_CONNECT\n");
1482 if (sc->scsi_done) {
1483 FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1484 sc->device->host->host_no, id, sc,
1485 jiffies_to_msecs(jiffies - start_time),
1486 0, ((u64)sc->cmnd[0] << 32 |
1487 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1488 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1489 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1495 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1496 u32 task_req, u8 *fc_lun,
1497 struct fnic_io_req *io_req)
1499 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1500 struct Scsi_Host *host = fnic->lport->host;
1501 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1502 unsigned long flags;
1504 spin_lock_irqsave(host->host_lock, flags);
1505 if (unlikely(fnic_chk_state_flags_locked(fnic,
1506 FNIC_FLAGS_IO_BLOCKED))) {
1507 spin_unlock_irqrestore(host->host_lock, flags);
1510 atomic_inc(&fnic->in_flight);
1511 spin_unlock_irqrestore(host->host_lock, flags);
1513 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1515 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1516 free_wq_copy_descs(fnic, wq);
1518 if (!vnic_wq_copy_desc_avail(wq)) {
1519 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1520 atomic_dec(&fnic->in_flight);
1521 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1522 "fnic_queue_abort_io_req: failure: no descriptors\n");
1523 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1526 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1527 0, task_req, tag, fc_lun, io_req->port_id,
1528 fnic->config.ra_tov, fnic->config.ed_tov);
1530 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1531 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1532 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1533 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1534 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1536 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1537 atomic_dec(&fnic->in_flight);
1542 static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1547 struct fnic_io_req *io_req;
1548 spinlock_t *io_lock;
1549 unsigned long flags;
1550 struct scsi_cmnd *sc;
1551 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1552 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1553 struct scsi_lun fc_lun;
1554 enum fnic_ioreq_state old_ioreq_state;
1556 FNIC_SCSI_DBG(KERN_DEBUG,
1558 "fnic_rport_exch_reset called portid 0x%06x\n",
1561 if (fnic->in_remove)
1564 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1566 io_lock = fnic_io_lock_tag(fnic, tag);
1567 spin_lock_irqsave(io_lock, flags);
1568 sc = scsi_host_find_tag(fnic->lport->host, tag);
1570 spin_unlock_irqrestore(io_lock, flags);
1574 io_req = (struct fnic_io_req *)CMD_SP(sc);
1576 if (!io_req || io_req->port_id != port_id) {
1577 spin_unlock_irqrestore(io_lock, flags);
1581 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1582 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1583 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1584 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1586 spin_unlock_irqrestore(io_lock, flags);
1591 * Found IO that is still pending with firmware and
1592 * belongs to rport that went away
1594 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1595 spin_unlock_irqrestore(io_lock, flags);
1598 if (io_req->abts_done) {
1599 shost_printk(KERN_ERR, fnic->lport->host,
1600 "fnic_rport_exch_reset: io_req->abts_done is set "
1602 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1605 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1606 shost_printk(KERN_ERR, fnic->lport->host,
1608 "IO not yet issued %p tag 0x%x flags "
1610 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1612 old_ioreq_state = CMD_STATE(sc);
1613 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1614 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1615 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1616 atomic64_inc(&reset_stats->device_reset_terminates);
1617 abt_tag = (tag | FNIC_TAG_DEV_RST);
1618 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1619 "fnic_rport_exch_reset dev rst sc 0x%p\n",
1623 BUG_ON(io_req->abts_done);
1625 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1626 "fnic_rport_reset_exch: Issuing abts\n");
1628 spin_unlock_irqrestore(io_lock, flags);
1630 /* Now queue the abort command to firmware */
1631 int_to_scsilun(sc->device->lun, &fc_lun);
1633 if (fnic_queue_abort_io_req(fnic, abt_tag,
1634 FCPIO_ITMF_ABT_TASK_TERM,
1635 fc_lun.scsi_lun, io_req)) {
1637 * Revert the cmd state back to old state, if
1638 * it hasn't changed in between. This cmd will get
1639 * aborted later by scsi_eh, or cleaned up during
1642 spin_lock_irqsave(io_lock, flags);
1643 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1644 CMD_STATE(sc) = old_ioreq_state;
1645 spin_unlock_irqrestore(io_lock, flags);
1647 spin_lock_irqsave(io_lock, flags);
1648 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1649 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1651 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1652 spin_unlock_irqrestore(io_lock, flags);
1653 atomic64_inc(&term_stats->terminates);
1657 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1658 atomic64_set(&term_stats->max_terminates, term_cnt);
1662 void fnic_terminate_rport_io(struct fc_rport *rport)
1667 struct fnic_io_req *io_req;
1668 spinlock_t *io_lock;
1669 unsigned long flags;
1670 struct scsi_cmnd *sc;
1671 struct scsi_lun fc_lun;
1672 struct fc_rport_libfc_priv *rdata;
1673 struct fc_lport *lport;
1675 struct fc_rport *cmd_rport;
1676 struct reset_stats *reset_stats;
1677 struct terminate_stats *term_stats;
1678 enum fnic_ioreq_state old_ioreq_state;
1681 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1684 rdata = rport->dd_data;
1687 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1690 lport = rdata->local_port;
1693 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1696 fnic = lport_priv(lport);
1697 FNIC_SCSI_DBG(KERN_DEBUG,
1698 fnic->lport->host, "fnic_terminate_rport_io called"
1699 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1700 rport->port_name, rport->node_name, rport,
1703 if (fnic->in_remove)
1706 reset_stats = &fnic->fnic_stats.reset_stats;
1707 term_stats = &fnic->fnic_stats.term_stats;
1709 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1711 io_lock = fnic_io_lock_tag(fnic, tag);
1712 spin_lock_irqsave(io_lock, flags);
1713 sc = scsi_host_find_tag(fnic->lport->host, tag);
1715 spin_unlock_irqrestore(io_lock, flags);
1719 cmd_rport = starget_to_rport(scsi_target(sc->device));
1720 if (rport != cmd_rport) {
1721 spin_unlock_irqrestore(io_lock, flags);
1725 io_req = (struct fnic_io_req *)CMD_SP(sc);
1727 if (!io_req || rport != cmd_rport) {
1728 spin_unlock_irqrestore(io_lock, flags);
1732 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1733 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1734 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1735 "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1737 spin_unlock_irqrestore(io_lock, flags);
1741 * Found IO that is still pending with firmware and
1742 * belongs to rport that went away
1744 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1745 spin_unlock_irqrestore(io_lock, flags);
1748 if (io_req->abts_done) {
1749 shost_printk(KERN_ERR, fnic->lport->host,
1750 "fnic_terminate_rport_io: io_req->abts_done is set "
1752 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1754 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1755 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1756 "fnic_terminate_rport_io "
1757 "IO not yet issued %p tag 0x%x flags "
1759 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1761 old_ioreq_state = CMD_STATE(sc);
1762 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1763 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1764 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1765 atomic64_inc(&reset_stats->device_reset_terminates);
1766 abt_tag = (tag | FNIC_TAG_DEV_RST);
1767 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1768 "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1771 BUG_ON(io_req->abts_done);
1773 FNIC_SCSI_DBG(KERN_DEBUG,
1775 "fnic_terminate_rport_io: Issuing abts\n");
1777 spin_unlock_irqrestore(io_lock, flags);
1779 /* Now queue the abort command to firmware */
1780 int_to_scsilun(sc->device->lun, &fc_lun);
1782 if (fnic_queue_abort_io_req(fnic, abt_tag,
1783 FCPIO_ITMF_ABT_TASK_TERM,
1784 fc_lun.scsi_lun, io_req)) {
1786 * Revert the cmd state back to old state, if
1787 * it hasn't changed in between. This cmd will get
1788 * aborted later by scsi_eh, or cleaned up during
1791 spin_lock_irqsave(io_lock, flags);
1792 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1793 CMD_STATE(sc) = old_ioreq_state;
1794 spin_unlock_irqrestore(io_lock, flags);
1796 spin_lock_irqsave(io_lock, flags);
1797 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1798 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1800 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1801 spin_unlock_irqrestore(io_lock, flags);
1802 atomic64_inc(&term_stats->terminates);
1806 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1807 atomic64_set(&term_stats->max_terminates, term_cnt);
1812 * This function is exported to SCSI for sending abort cmnds.
1813 * A SCSI IO is represented by a io_req in the driver.
1814 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1816 int fnic_abort_cmd(struct scsi_cmnd *sc)
1818 struct fc_lport *lp;
1820 struct fnic_io_req *io_req = NULL;
1821 struct fc_rport *rport;
1822 spinlock_t *io_lock;
1823 unsigned long flags;
1824 unsigned long start_time = 0;
1827 struct scsi_lun fc_lun;
1828 struct fnic_stats *fnic_stats;
1829 struct abort_stats *abts_stats;
1830 struct terminate_stats *term_stats;
1831 enum fnic_ioreq_state old_ioreq_state;
1833 unsigned long abt_issued_time;
1834 DECLARE_COMPLETION_ONSTACK(tm_done);
1836 /* Wait for rport to unblock */
1837 fc_block_scsi_eh(sc);
1839 /* Get local-port, check ready and link up */
1840 lp = shost_priv(sc->device->host);
1842 fnic = lport_priv(lp);
1843 fnic_stats = &fnic->fnic_stats;
1844 abts_stats = &fnic->fnic_stats.abts_stats;
1845 term_stats = &fnic->fnic_stats.term_stats;
1847 rport = starget_to_rport(scsi_target(sc->device));
1848 tag = sc->request->tag;
1849 FNIC_SCSI_DBG(KERN_DEBUG,
1851 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1852 rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1854 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1856 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1858 goto fnic_abort_cmd_end;
1862 * Avoid a race between SCSI issuing the abort and the device
1863 * completing the command.
1865 * If the command is already completed by the fw cmpl code,
1866 * we just return SUCCESS from here. This means that the abort
1867 * succeeded. In the SCSI ML, since the timeout for command has
1868 * happened, the completion wont actually complete the command
1869 * and it will be considered as an aborted command
1871 * The CMD_SP will not be cleared except while holding io_req_lock.
1873 io_lock = fnic_io_lock_hash(fnic, sc);
1874 spin_lock_irqsave(io_lock, flags);
1875 io_req = (struct fnic_io_req *)CMD_SP(sc);
1877 spin_unlock_irqrestore(io_lock, flags);
1878 goto fnic_abort_cmd_end;
1881 io_req->abts_done = &tm_done;
1883 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1884 spin_unlock_irqrestore(io_lock, flags);
1888 abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1889 if (abt_issued_time <= 6000)
1890 atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
1891 else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
1892 atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
1893 else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
1894 atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
1895 else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
1896 atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
1897 else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
1898 atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
1899 else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
1900 atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
1902 atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
1904 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1905 "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
1907 * Command is still pending, need to abort it
1908 * If the firmware completes the command after this point,
1909 * the completion wont be done till mid-layer, since abort
1910 * has already started.
1912 old_ioreq_state = CMD_STATE(sc);
1913 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1914 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1916 spin_unlock_irqrestore(io_lock, flags);
1919 * Check readiness of the remote port. If the path to remote
1920 * port is up, then send abts to the remote port to terminate
1921 * the IO. Else, just locally terminate the IO in the firmware
1923 if (fc_remote_port_chkready(rport) == 0)
1924 task_req = FCPIO_ITMF_ABT_TASK;
1926 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1927 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1930 /* Now queue the abort command to firmware */
1931 int_to_scsilun(sc->device->lun, &fc_lun);
1933 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1934 fc_lun.scsi_lun, io_req)) {
1935 spin_lock_irqsave(io_lock, flags);
1936 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1937 CMD_STATE(sc) = old_ioreq_state;
1938 io_req = (struct fnic_io_req *)CMD_SP(sc);
1940 io_req->abts_done = NULL;
1941 spin_unlock_irqrestore(io_lock, flags);
1943 goto fnic_abort_cmd_end;
1945 if (task_req == FCPIO_ITMF_ABT_TASK) {
1946 CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1947 atomic64_inc(&fnic_stats->abts_stats.aborts);
1949 CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1950 atomic64_inc(&fnic_stats->term_stats.terminates);
1954 * We queued an abort IO, wait for its completion.
1955 * Once the firmware completes the abort command, it will
1956 * wake up this thread.
1959 wait_for_completion_timeout(&tm_done,
1961 (2 * fnic->config.ra_tov +
1962 fnic->config.ed_tov));
1964 /* Check the abort status */
1965 spin_lock_irqsave(io_lock, flags);
1967 io_req = (struct fnic_io_req *)CMD_SP(sc);
1969 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1970 spin_unlock_irqrestore(io_lock, flags);
1971 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1973 goto fnic_abort_cmd_end;
1975 io_req->abts_done = NULL;
1977 /* fw did not complete abort, timed out */
1978 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1979 spin_unlock_irqrestore(io_lock, flags);
1980 if (task_req == FCPIO_ITMF_ABT_TASK) {
1981 atomic64_inc(&abts_stats->abort_drv_timeouts);
1983 atomic64_inc(&term_stats->terminate_drv_timeouts);
1985 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1987 goto fnic_abort_cmd_end;
1990 /* IO out of order */
1992 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1993 spin_unlock_irqrestore(io_lock, flags);
1994 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1995 "Issuing Host reset due to out of order IO\n");
1998 goto fnic_abort_cmd_end;
2001 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2003 start_time = io_req->start_time;
2005 * firmware completed the abort, check the status,
2006 * free the io_req if successful. If abort fails,
2007 * Device reset will clean the I/O.
2009 if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
2013 spin_unlock_irqrestore(io_lock, flags);
2014 goto fnic_abort_cmd_end;
2017 spin_unlock_irqrestore(io_lock, flags);
2019 fnic_release_ioreq_buf(fnic, io_req, sc);
2020 mempool_free(io_req, fnic->io_req_pool);
2022 if (sc->scsi_done) {
2023 /* Call SCSI completion function to complete the IO */
2024 sc->result = (DID_ABORT << 16);
2026 atomic64_dec(&fnic_stats->io_stats.active_ios);
2027 if (atomic64_read(&fnic->io_cmpl_skip))
2028 atomic64_dec(&fnic->io_cmpl_skip);
2030 atomic64_inc(&fnic_stats->io_stats.io_completions);
2034 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
2035 sc->request->tag, sc,
2036 jiffies_to_msecs(jiffies - start_time),
2037 0, ((u64)sc->cmnd[0] << 32 |
2038 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2039 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2040 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2042 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2043 "Returning from abort cmd type %x %s\n", task_req,
2045 "SUCCESS" : "FAILED");
2049 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
2050 struct scsi_cmnd *sc,
2051 struct fnic_io_req *io_req)
2053 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
2054 struct Scsi_Host *host = fnic->lport->host;
2055 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2056 struct scsi_lun fc_lun;
2058 unsigned long intr_flags;
2060 spin_lock_irqsave(host->host_lock, intr_flags);
2061 if (unlikely(fnic_chk_state_flags_locked(fnic,
2062 FNIC_FLAGS_IO_BLOCKED))) {
2063 spin_unlock_irqrestore(host->host_lock, intr_flags);
2066 atomic_inc(&fnic->in_flight);
2067 spin_unlock_irqrestore(host->host_lock, intr_flags);
2069 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
2071 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
2072 free_wq_copy_descs(fnic, wq);
2074 if (!vnic_wq_copy_desc_avail(wq)) {
2075 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2076 "queue_dr_io_req failure - no descriptors\n");
2077 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2082 /* fill in the lun info */
2083 int_to_scsilun(sc->device->lun, &fc_lun);
2085 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
2086 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2087 fc_lun.scsi_lun, io_req->port_id,
2088 fnic->config.ra_tov, fnic->config.ed_tov);
2090 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2091 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2092 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2093 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2094 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2097 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2098 atomic_dec(&fnic->in_flight);
2104 * Clean up any pending aborts on the lun
2105 * For each outstanding IO on this lun, whose abort is not completed by fw,
2106 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2107 * successfully aborted, 1 otherwise
2109 static int fnic_clean_pending_aborts(struct fnic *fnic,
2110 struct scsi_cmnd *lr_sc,
2115 struct fnic_io_req *io_req;
2116 spinlock_t *io_lock;
2117 unsigned long flags;
2119 struct scsi_cmnd *sc;
2120 struct scsi_lun fc_lun;
2121 struct scsi_device *lun_dev = lr_sc->device;
2122 DECLARE_COMPLETION_ONSTACK(tm_done);
2123 enum fnic_ioreq_state old_ioreq_state;
2125 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2126 io_lock = fnic_io_lock_tag(fnic, tag);
2127 spin_lock_irqsave(io_lock, flags);
2128 sc = scsi_host_find_tag(fnic->lport->host, tag);
2130 * ignore this lun reset cmd if issued using new SC
2131 * or cmds that do not belong to this lun
2133 if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
2134 spin_unlock_irqrestore(io_lock, flags);
2138 io_req = (struct fnic_io_req *)CMD_SP(sc);
2140 if (!io_req || sc->device != lun_dev) {
2141 spin_unlock_irqrestore(io_lock, flags);
2146 * Found IO that is still pending with firmware and
2147 * belongs to the LUN that we are resetting
2149 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2150 "Found IO in %s on lun\n",
2151 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2153 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2154 spin_unlock_irqrestore(io_lock, flags);
2157 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2158 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2159 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2160 "%s dev rst not pending sc 0x%p\n", __func__,
2162 spin_unlock_irqrestore(io_lock, flags);
2166 if (io_req->abts_done)
2167 shost_printk(KERN_ERR, fnic->lport->host,
2168 "%s: io_req->abts_done is set state is %s\n",
2169 __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2170 old_ioreq_state = CMD_STATE(sc);
2172 * Any pending IO issued prior to reset is expected to be
2173 * in abts pending state, if not we need to set
2174 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2175 * When IO is completed, the IO will be handed over and
2176 * handled in this function.
2178 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2180 BUG_ON(io_req->abts_done);
2183 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2184 abt_tag |= FNIC_TAG_DEV_RST;
2185 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2186 "%s: dev rst sc 0x%p\n", __func__, sc);
2189 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2190 io_req->abts_done = &tm_done;
2191 spin_unlock_irqrestore(io_lock, flags);
2193 /* Now queue the abort command to firmware */
2194 int_to_scsilun(sc->device->lun, &fc_lun);
2196 if (fnic_queue_abort_io_req(fnic, abt_tag,
2197 FCPIO_ITMF_ABT_TASK_TERM,
2198 fc_lun.scsi_lun, io_req)) {
2199 spin_lock_irqsave(io_lock, flags);
2200 io_req = (struct fnic_io_req *)CMD_SP(sc);
2202 io_req->abts_done = NULL;
2203 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2204 CMD_STATE(sc) = old_ioreq_state;
2205 spin_unlock_irqrestore(io_lock, flags);
2207 goto clean_pending_aborts_end;
2209 spin_lock_irqsave(io_lock, flags);
2210 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2211 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2212 spin_unlock_irqrestore(io_lock, flags);
2214 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2216 wait_for_completion_timeout(&tm_done,
2218 (fnic->config.ed_tov));
2220 /* Recheck cmd state to check if it is now aborted */
2221 spin_lock_irqsave(io_lock, flags);
2222 io_req = (struct fnic_io_req *)CMD_SP(sc);
2224 spin_unlock_irqrestore(io_lock, flags);
2225 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2229 io_req->abts_done = NULL;
2231 /* if abort is still pending with fw, fail */
2232 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2233 spin_unlock_irqrestore(io_lock, flags);
2234 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2236 goto clean_pending_aborts_end;
2238 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2240 /* original sc used for lr is handled by dev reset code */
2243 spin_unlock_irqrestore(io_lock, flags);
2245 /* original sc used for lr is handled by dev reset code */
2247 fnic_release_ioreq_buf(fnic, io_req, sc);
2248 mempool_free(io_req, fnic->io_req_pool);
2252 * Any IO is returned during reset, it needs to call scsi_done
2253 * to return the scsi_cmnd to upper layer.
2255 if (sc->scsi_done) {
2256 /* Set result to let upper SCSI layer retry */
2257 sc->result = DID_RESET << 16;
2262 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2264 /* walk again to check, if IOs are still pending in fw */
2265 if (fnic_is_abts_pending(fnic, lr_sc))
2268 clean_pending_aborts_end:
2273 * fnic_scsi_host_start_tag
2274 * Allocates tagid from host's tag list
2277 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2279 struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2280 int tag, ret = SCSI_NO_TAG;
2284 pr_err("Tags are not supported\n");
2289 tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
2290 if (tag >= bqt->max_depth) {
2291 pr_err("Tag allocation failure\n");
2294 } while (test_and_set_bit(tag, bqt->tag_map));
2296 bqt->tag_index[tag] = sc->request;
2297 sc->request->tag = tag;
2299 if (!sc->request->special)
2300 sc->request->special = sc;
2309 * fnic_scsi_host_end_tag
2310 * frees tag allocated by fnic_scsi_host_start_tag.
2313 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2315 struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2316 int tag = sc->request->tag;
2318 if (tag == SCSI_NO_TAG)
2321 BUG_ON(!bqt || !bqt->tag_index[tag]);
2325 bqt->tag_index[tag] = NULL;
2326 clear_bit(tag, bqt->tag_map);
2332 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2333 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2336 int fnic_device_reset(struct scsi_cmnd *sc)
2338 struct fc_lport *lp;
2340 struct fnic_io_req *io_req = NULL;
2341 struct fc_rport *rport;
2344 spinlock_t *io_lock;
2345 unsigned long flags;
2346 unsigned long start_time = 0;
2347 struct scsi_lun fc_lun;
2348 struct fnic_stats *fnic_stats;
2349 struct reset_stats *reset_stats;
2351 DECLARE_COMPLETION_ONSTACK(tm_done);
2352 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
2355 /* Wait for rport to unblock */
2356 fc_block_scsi_eh(sc);
2358 /* Get local-port, check ready and link up */
2359 lp = shost_priv(sc->device->host);
2361 fnic = lport_priv(lp);
2362 fnic_stats = &fnic->fnic_stats;
2363 reset_stats = &fnic->fnic_stats.reset_stats;
2365 atomic64_inc(&reset_stats->device_resets);
2367 rport = starget_to_rport(scsi_target(sc->device));
2368 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2369 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2370 rport->port_id, sc->device->lun, sc);
2372 if (lp->state != LPORT_ST_READY || !(lp->link_up))
2373 goto fnic_device_reset_end;
2375 /* Check if remote port up */
2376 if (fc_remote_port_chkready(rport)) {
2377 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2378 goto fnic_device_reset_end;
2381 CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2382 /* Allocate tag if not present */
2384 tag = sc->request->tag;
2385 if (unlikely(tag < 0)) {
2387 * XXX(hch): current the midlayer fakes up a struct
2388 * request for the explicit reset ioctls, and those
2389 * don't have a tag allocated to them. The below
2390 * code pokes into midlayer structures to paper over
2391 * this design issue, but that won't work for blk-mq.
2393 * Either someone who can actually test the hardware
2394 * will have to come up with a similar hack for the
2395 * blk-mq case, or we'll have to bite the bullet and
2396 * fix the way the EH ioctls work for real, but until
2397 * that happens we fail these explicit requests here.
2400 tag = fnic_scsi_host_start_tag(fnic, sc);
2401 if (unlikely(tag == SCSI_NO_TAG))
2402 goto fnic_device_reset_end;
2406 io_lock = fnic_io_lock_hash(fnic, sc);
2407 spin_lock_irqsave(io_lock, flags);
2408 io_req = (struct fnic_io_req *)CMD_SP(sc);
2411 * If there is a io_req attached to this command, then use it,
2412 * else allocate a new one.
2415 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2417 spin_unlock_irqrestore(io_lock, flags);
2418 goto fnic_device_reset_end;
2420 memset(io_req, 0, sizeof(*io_req));
2421 io_req->port_id = rport->port_id;
2422 CMD_SP(sc) = (char *)io_req;
2424 io_req->dr_done = &tm_done;
2425 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2426 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2427 spin_unlock_irqrestore(io_lock, flags);
2429 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2432 * issue the device reset, if enqueue failed, clean up the ioreq
2433 * and break assoc with scsi cmd
2435 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2436 spin_lock_irqsave(io_lock, flags);
2437 io_req = (struct fnic_io_req *)CMD_SP(sc);
2439 io_req->dr_done = NULL;
2440 goto fnic_device_reset_clean;
2442 spin_lock_irqsave(io_lock, flags);
2443 CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2444 spin_unlock_irqrestore(io_lock, flags);
2447 * Wait on the local completion for LUN reset. The io_req may be
2448 * freed while we wait since we hold no lock.
2450 wait_for_completion_timeout(&tm_done,
2451 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2453 spin_lock_irqsave(io_lock, flags);
2454 io_req = (struct fnic_io_req *)CMD_SP(sc);
2456 spin_unlock_irqrestore(io_lock, flags);
2457 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2458 "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2459 goto fnic_device_reset_end;
2461 io_req->dr_done = NULL;
2463 status = CMD_LR_STATUS(sc);
2466 * If lun reset not completed, bail out with failed. io_req
2467 * gets cleaned up during higher levels of EH
2469 if (status == FCPIO_INVALID_CODE) {
2470 atomic64_inc(&reset_stats->device_reset_timeouts);
2471 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2472 "Device reset timed out\n");
2473 CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2474 spin_unlock_irqrestore(io_lock, flags);
2475 int_to_scsilun(sc->device->lun, &fc_lun);
2477 * Issue abort and terminate on device reset request.
2478 * If q'ing of terminate fails, retry it after a delay.
2481 spin_lock_irqsave(io_lock, flags);
2482 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2483 spin_unlock_irqrestore(io_lock, flags);
2486 spin_unlock_irqrestore(io_lock, flags);
2487 if (fnic_queue_abort_io_req(fnic,
2488 tag | FNIC_TAG_DEV_RST,
2489 FCPIO_ITMF_ABT_TASK_TERM,
2490 fc_lun.scsi_lun, io_req)) {
2491 wait_for_completion_timeout(&tm_done,
2492 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2494 spin_lock_irqsave(io_lock, flags);
2495 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2496 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2497 io_req->abts_done = &tm_done;
2498 spin_unlock_irqrestore(io_lock, flags);
2499 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2500 "Abort and terminate issued on Device reset "
2501 "tag 0x%x sc 0x%p\n", tag, sc);
2506 spin_lock_irqsave(io_lock, flags);
2507 if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2508 spin_unlock_irqrestore(io_lock, flags);
2509 wait_for_completion_timeout(&tm_done,
2510 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2513 io_req = (struct fnic_io_req *)CMD_SP(sc);
2514 io_req->abts_done = NULL;
2515 goto fnic_device_reset_clean;
2519 spin_unlock_irqrestore(io_lock, flags);
2522 /* Completed, but not successful, clean up the io_req, return fail */
2523 if (status != FCPIO_SUCCESS) {
2524 spin_lock_irqsave(io_lock, flags);
2525 FNIC_SCSI_DBG(KERN_DEBUG,
2527 "Device reset completed - failed\n");
2528 io_req = (struct fnic_io_req *)CMD_SP(sc);
2529 goto fnic_device_reset_clean;
2533 * Clean up any aborts on this lun that have still not
2534 * completed. If any of these fail, then LUN reset fails.
2535 * clean_pending_aborts cleans all cmds on this lun except
2536 * the lun reset cmd. If all cmds get cleaned, the lun reset
2539 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2540 spin_lock_irqsave(io_lock, flags);
2541 io_req = (struct fnic_io_req *)CMD_SP(sc);
2542 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2543 "Device reset failed"
2544 " since could not abort all IOs\n");
2545 goto fnic_device_reset_clean;
2548 /* Clean lun reset command */
2549 spin_lock_irqsave(io_lock, flags);
2550 io_req = (struct fnic_io_req *)CMD_SP(sc);
2552 /* Completed, and successful */
2555 fnic_device_reset_clean:
2559 spin_unlock_irqrestore(io_lock, flags);
2562 start_time = io_req->start_time;
2563 fnic_release_ioreq_buf(fnic, io_req, sc);
2564 mempool_free(io_req, fnic->io_req_pool);
2567 fnic_device_reset_end:
2568 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2569 sc->request->tag, sc,
2570 jiffies_to_msecs(jiffies - start_time),
2571 0, ((u64)sc->cmnd[0] << 32 |
2572 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2573 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2574 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2576 /* free tag if it is allocated */
2577 if (unlikely(tag_gen_flag))
2578 fnic_scsi_host_end_tag(fnic, sc);
2580 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2581 "Returning from device reset %s\n",
2583 "SUCCESS" : "FAILED");
2586 atomic64_inc(&reset_stats->device_reset_failures);
2591 /* Clean up all IOs, clean up libFC local port */
2592 int fnic_reset(struct Scsi_Host *shost)
2594 struct fc_lport *lp;
2597 struct reset_stats *reset_stats;
2599 lp = shost_priv(shost);
2600 fnic = lport_priv(lp);
2601 reset_stats = &fnic->fnic_stats.reset_stats;
2603 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2604 "fnic_reset called\n");
2606 atomic64_inc(&reset_stats->fnic_resets);
2609 * Reset local port, this will clean up libFC exchanges,
2610 * reset remote port sessions, and if link is up, begin flogi
2612 ret = fc_lport_reset(lp);
2614 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2615 "Returning from fnic reset %s\n",
2617 "SUCCESS" : "FAILED");
2620 atomic64_inc(&reset_stats->fnic_reset_completions);
2622 atomic64_inc(&reset_stats->fnic_reset_failures);
2628 * SCSI Error handling calls driver's eh_host_reset if all prior
2629 * error handling levels return FAILED. If host reset completes
2630 * successfully, and if link is up, then Fabric login begins.
2632 * Host Reset is the highest level of error recovery. If this fails, then
2633 * host is offlined by SCSI.
2636 int fnic_host_reset(struct scsi_cmnd *sc)
2639 unsigned long wait_host_tmo;
2640 struct Scsi_Host *shost = sc->device->host;
2641 struct fc_lport *lp = shost_priv(shost);
2642 struct fnic *fnic = lport_priv(lp);
2643 unsigned long flags;
2645 spin_lock_irqsave(&fnic->fnic_lock, flags);
2646 if (fnic->internal_reset_inprogress == 0) {
2647 fnic->internal_reset_inprogress = 1;
2649 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2650 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2651 "host reset in progress skipping another host reset\n");
2654 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2657 * If fnic_reset is successful, wait for fabric login to complete
2658 * scsi-ml tries to send a TUR to every device if host reset is
2659 * successful, so before returning to scsi, fabric should be up
2661 ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2662 if (ret == SUCCESS) {
2663 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2665 while (time_before(jiffies, wait_host_tmo)) {
2666 if ((lp->state == LPORT_ST_READY) &&
2675 spin_lock_irqsave(&fnic->fnic_lock, flags);
2676 fnic->internal_reset_inprogress = 0;
2677 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2682 * This fxn is called from libFC when host is removed
2684 void fnic_scsi_abort_io(struct fc_lport *lp)
2687 unsigned long flags;
2688 enum fnic_state old_state;
2689 struct fnic *fnic = lport_priv(lp);
2690 DECLARE_COMPLETION_ONSTACK(remove_wait);
2692 /* Issue firmware reset for fnic, wait for reset to complete */
2694 spin_lock_irqsave(&fnic->fnic_lock, flags);
2695 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2696 /* fw reset is in progress, poll for its completion */
2697 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2698 schedule_timeout(msecs_to_jiffies(100));
2699 goto retry_fw_reset;
2702 fnic->remove_wait = &remove_wait;
2703 old_state = fnic->state;
2704 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2705 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2706 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2708 err = fnic_fw_reset_handler(fnic);
2710 spin_lock_irqsave(&fnic->fnic_lock, flags);
2711 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2712 fnic->state = old_state;
2713 fnic->remove_wait = NULL;
2714 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2718 /* Wait for firmware reset to complete */
2719 wait_for_completion_timeout(&remove_wait,
2720 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2722 spin_lock_irqsave(&fnic->fnic_lock, flags);
2723 fnic->remove_wait = NULL;
2724 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2725 "fnic_scsi_abort_io %s\n",
2726 (fnic->state == FNIC_IN_ETH_MODE) ?
2727 "SUCCESS" : "FAILED");
2728 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2733 * This fxn called from libFC to clean up driver IO state on link down
2735 void fnic_scsi_cleanup(struct fc_lport *lp)
2737 unsigned long flags;
2738 enum fnic_state old_state;
2739 struct fnic *fnic = lport_priv(lp);
2741 /* issue fw reset */
2743 spin_lock_irqsave(&fnic->fnic_lock, flags);
2744 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2745 /* fw reset is in progress, poll for its completion */
2746 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2747 schedule_timeout(msecs_to_jiffies(100));
2748 goto retry_fw_reset;
2750 old_state = fnic->state;
2751 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2752 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2753 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2755 if (fnic_fw_reset_handler(fnic)) {
2756 spin_lock_irqsave(&fnic->fnic_lock, flags);
2757 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2758 fnic->state = old_state;
2759 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2764 void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2768 void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2770 struct fnic *fnic = lport_priv(lp);
2772 /* Non-zero sid, nothing to do */
2774 goto call_fc_exch_mgr_reset;
2777 fnic_rport_exch_reset(fnic, did);
2778 goto call_fc_exch_mgr_reset;
2783 * link down or device being removed
2785 if (!fnic->in_remove)
2786 fnic_scsi_cleanup(lp);
2788 fnic_scsi_abort_io(lp);
2790 /* call libFC exch mgr reset to reset its exchanges */
2791 call_fc_exch_mgr_reset:
2792 fc_exch_mgr_reset(lp, sid, did);
2797 * fnic_is_abts_pending() is a helper function that
2798 * walks through tag map to check if there is any IOs pending,if there is one,
2799 * then it returns 1 (true), otherwise 0 (false)
2800 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2801 * otherwise, it checks for all IOs.
2803 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2806 struct fnic_io_req *io_req;
2807 spinlock_t *io_lock;
2808 unsigned long flags;
2810 struct scsi_cmnd *sc;
2811 struct scsi_device *lun_dev = NULL;
2814 lun_dev = lr_sc->device;
2816 /* walk again to check, if IOs are still pending in fw */
2817 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2818 sc = scsi_host_find_tag(fnic->lport->host, tag);
2820 * ignore this lun reset cmd or cmds that do not belong to
2823 if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2826 io_lock = fnic_io_lock_hash(fnic, sc);
2827 spin_lock_irqsave(io_lock, flags);
2829 io_req = (struct fnic_io_req *)CMD_SP(sc);
2831 if (!io_req || sc->device != lun_dev) {
2832 spin_unlock_irqrestore(io_lock, flags);
2837 * Found IO that is still pending with firmware and
2838 * belongs to the LUN that we are resetting
2840 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2841 "Found IO in %s on lun\n",
2842 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2844 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2846 spin_unlock_irqrestore(io_lock, flags);