GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / scsi / qedf / qedf_io.c
1 /*
2  *  QLogic FCoE Offload Driver
3  *  Copyright (c) 2016-2018 Cavium Inc.
4  *
5  *  This software is available under the terms of the GNU General Public License
6  *  (GPL) Version 2, available from the file COPYING in the main directory of
7  *  this source tree.
8  */
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 #include "qedf.h"
12 #include <scsi/scsi_tcq.h>
13
14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15         unsigned int timer_msec)
16 {
17         queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18             msecs_to_jiffies(timer_msec));
19 }
20
21 static void qedf_cmd_timeout(struct work_struct *work)
22 {
23
24         struct qedf_ioreq *io_req =
25             container_of(work, struct qedf_ioreq, timeout_work.work);
26         struct qedf_ctx *qedf;
27         struct qedf_rport *fcport;
28         u8 op = 0;
29
30         if (io_req == NULL) {
31                 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
32                 return;
33         }
34
35         fcport = io_req->fcport;
36         if (io_req->fcport == NULL) {
37                 QEDF_INFO(NULL, QEDF_LOG_IO,  "fcport is NULL.\n");
38                 return;
39         }
40
41         qedf = fcport->qedf;
42
43         switch (io_req->cmd_type) {
44         case QEDF_ABTS:
45                 if (qedf == NULL) {
46                         QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
47                             io_req->xid);
48                         return;
49                 }
50
51                 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
52                     io_req->xid);
53                 /* Cleanup timed out ABTS */
54                 qedf_initiate_cleanup(io_req, true);
55                 complete(&io_req->abts_done);
56
57                 /*
58                  * Need to call kref_put for reference taken when initiate_abts
59                  * was called since abts_compl won't be called now that we've
60                  * cleaned up the task.
61                  */
62                 kref_put(&io_req->refcount, qedf_release_cmd);
63
64                 /*
65                  * Now that the original I/O and the ABTS are complete see
66                  * if we need to reconnect to the target.
67                  */
68                 qedf_restart_rport(fcport);
69                 break;
70         case QEDF_ELS:
71                 kref_get(&io_req->refcount);
72                 /*
73                  * Don't attempt to clean an ELS timeout as any subseqeunt
74                  * ABTS or cleanup requests just hang.  For now just free
75                  * the resources of the original I/O and the RRQ
76                  */
77                 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
78                           io_req->xid);
79                 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
80                 /* Call callback function to complete command */
81                 if (io_req->cb_func && io_req->cb_arg) {
82                         op = io_req->cb_arg->op;
83                         io_req->cb_func(io_req->cb_arg);
84                         io_req->cb_arg = NULL;
85                 }
86                 qedf_initiate_cleanup(io_req, true);
87                 kref_put(&io_req->refcount, qedf_release_cmd);
88                 break;
89         case QEDF_SEQ_CLEANUP:
90                 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
91                     "xid=0x%x.\n", io_req->xid);
92                 qedf_initiate_cleanup(io_req, true);
93                 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
94                 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
95                 break;
96         default:
97                 break;
98         }
99 }
100
101 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
102 {
103         struct io_bdt *bdt_info;
104         struct qedf_ctx *qedf = cmgr->qedf;
105         size_t bd_tbl_sz;
106         u16 min_xid = QEDF_MIN_XID;
107         u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
108         int num_ios;
109         int i;
110         struct qedf_ioreq *io_req;
111
112         num_ios = max_xid - min_xid + 1;
113
114         /* Free fcoe_bdt_ctx structures */
115         if (!cmgr->io_bdt_pool)
116                 goto free_cmd_pool;
117
118         bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
119         for (i = 0; i < num_ios; i++) {
120                 bdt_info = cmgr->io_bdt_pool[i];
121                 if (bdt_info->bd_tbl) {
122                         dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
123                             bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
124                         bdt_info->bd_tbl = NULL;
125                 }
126         }
127
128         /* Destroy io_bdt pool */
129         for (i = 0; i < num_ios; i++) {
130                 kfree(cmgr->io_bdt_pool[i]);
131                 cmgr->io_bdt_pool[i] = NULL;
132         }
133
134         kfree(cmgr->io_bdt_pool);
135         cmgr->io_bdt_pool = NULL;
136
137 free_cmd_pool:
138
139         for (i = 0; i < num_ios; i++) {
140                 io_req = &cmgr->cmds[i];
141                 kfree(io_req->sgl_task_params);
142                 kfree(io_req->task_params);
143                 /* Make sure we free per command sense buffer */
144                 if (io_req->sense_buffer)
145                         dma_free_coherent(&qedf->pdev->dev,
146                             QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
147                             io_req->sense_buffer_dma);
148                 cancel_delayed_work_sync(&io_req->rrq_work);
149         }
150
151         /* Free command manager itself */
152         vfree(cmgr);
153 }
154
155 static void qedf_handle_rrq(struct work_struct *work)
156 {
157         struct qedf_ioreq *io_req =
158             container_of(work, struct qedf_ioreq, rrq_work.work);
159
160         qedf_send_rrq(io_req);
161
162 }
163
164 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
165 {
166         struct qedf_cmd_mgr *cmgr;
167         struct io_bdt *bdt_info;
168         struct qedf_ioreq *io_req;
169         u16 xid;
170         int i;
171         int num_ios;
172         u16 min_xid = QEDF_MIN_XID;
173         u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
174
175         /* Make sure num_queues is already set before calling this function */
176         if (!qedf->num_queues) {
177                 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
178                 return NULL;
179         }
180
181         if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
182                 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
183                            "max_xid 0x%x.\n", min_xid, max_xid);
184                 return NULL;
185         }
186
187         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
188                    "0x%x.\n", min_xid, max_xid);
189
190         num_ios = max_xid - min_xid + 1;
191
192         cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
193         if (!cmgr) {
194                 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
195                 return NULL;
196         }
197
198         cmgr->qedf = qedf;
199         spin_lock_init(&cmgr->lock);
200
201         /*
202          * Initialize I/O request fields.
203          */
204         xid = QEDF_MIN_XID;
205
206         for (i = 0; i < num_ios; i++) {
207                 io_req = &cmgr->cmds[i];
208                 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
209
210                 io_req->xid = xid++;
211
212                 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
213
214                 /* Allocate DMA memory to hold sense buffer */
215                 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
216                     QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
217                     GFP_KERNEL);
218                 if (!io_req->sense_buffer)
219                         goto mem_err;
220
221                 /* Allocate task parameters to pass to f/w init funcions */
222                 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
223                                               GFP_KERNEL);
224                 if (!io_req->task_params) {
225                         QEDF_ERR(&(qedf->dbg_ctx),
226                                  "Failed to allocate task_params for xid=0x%x\n",
227                                  i);
228                         goto mem_err;
229                 }
230
231                 /*
232                  * Allocate scatter/gather list info to pass to f/w init
233                  * functions.
234                  */
235                 io_req->sgl_task_params = kzalloc(
236                     sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
237                 if (!io_req->sgl_task_params) {
238                         QEDF_ERR(&(qedf->dbg_ctx),
239                                  "Failed to allocate sgl_task_params for xid=0x%x\n",
240                                  i);
241                         goto mem_err;
242                 }
243         }
244
245         /* Allocate pool of io_bdts - one for each qedf_ioreq */
246         cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
247             GFP_KERNEL);
248
249         if (!cmgr->io_bdt_pool) {
250                 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
251                 goto mem_err;
252         }
253
254         for (i = 0; i < num_ios; i++) {
255                 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
256                     GFP_KERNEL);
257                 if (!cmgr->io_bdt_pool[i]) {
258                         QEDF_WARN(&(qedf->dbg_ctx),
259                                   "Failed to alloc io_bdt_pool[%d].\n", i);
260                         goto mem_err;
261                 }
262         }
263
264         for (i = 0; i < num_ios; i++) {
265                 bdt_info = cmgr->io_bdt_pool[i];
266                 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
267                     QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
268                     &bdt_info->bd_tbl_dma, GFP_KERNEL);
269                 if (!bdt_info->bd_tbl) {
270                         QEDF_WARN(&(qedf->dbg_ctx),
271                                   "Failed to alloc bdt_tbl[%d].\n", i);
272                         goto mem_err;
273                 }
274         }
275         atomic_set(&cmgr->free_list_cnt, num_ios);
276         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
277             "cmgr->free_list_cnt=%d.\n",
278             atomic_read(&cmgr->free_list_cnt));
279
280         return cmgr;
281
282 mem_err:
283         qedf_cmd_mgr_free(cmgr);
284         return NULL;
285 }
286
287 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
288 {
289         struct qedf_ctx *qedf = fcport->qedf;
290         struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
291         struct qedf_ioreq *io_req = NULL;
292         struct io_bdt *bd_tbl;
293         u16 xid;
294         uint32_t free_sqes;
295         int i;
296         unsigned long flags;
297
298         free_sqes = atomic_read(&fcport->free_sqes);
299
300         if (!free_sqes) {
301                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
302                     "Returning NULL, free_sqes=%d.\n ",
303                     free_sqes);
304                 goto out_failed;
305         }
306
307         /* Limit the number of outstanding R/W tasks */
308         if ((atomic_read(&fcport->num_active_ios) >=
309             NUM_RW_TASKS_PER_CONNECTION)) {
310                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
311                     "Returning NULL, num_active_ios=%d.\n",
312                     atomic_read(&fcport->num_active_ios));
313                 goto out_failed;
314         }
315
316         /* Limit global TIDs certain tasks */
317         if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
318                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
319                     "Returning NULL, free_list_cnt=%d.\n",
320                     atomic_read(&cmd_mgr->free_list_cnt));
321                 goto out_failed;
322         }
323
324         spin_lock_irqsave(&cmd_mgr->lock, flags);
325         for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
326                 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
327                 cmd_mgr->idx++;
328                 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
329                         cmd_mgr->idx = 0;
330
331                 /* Check to make sure command was previously freed */
332                 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
333                         break;
334         }
335
336         if (i == FCOE_PARAMS_NUM_TASKS) {
337                 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
338                 goto out_failed;
339         }
340
341         set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
342         spin_unlock_irqrestore(&cmd_mgr->lock, flags);
343
344         atomic_inc(&fcport->num_active_ios);
345         atomic_dec(&fcport->free_sqes);
346         xid = io_req->xid;
347         atomic_dec(&cmd_mgr->free_list_cnt);
348
349         io_req->cmd_mgr = cmd_mgr;
350         io_req->fcport = fcport;
351
352         /* Hold the io_req against deletion */
353         kref_init(&io_req->refcount);
354
355         /* Bind io_bdt for this io_req */
356         /* Have a static link between io_req and io_bdt_pool */
357         bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
358         if (bd_tbl == NULL) {
359                 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
360                 kref_put(&io_req->refcount, qedf_release_cmd);
361                 goto out_failed;
362         }
363         bd_tbl->io_req = io_req;
364         io_req->cmd_type = cmd_type;
365         io_req->tm_flags = 0;
366
367         /* Reset sequence offset data */
368         io_req->rx_buf_off = 0;
369         io_req->tx_buf_off = 0;
370         io_req->rx_id = 0xffff; /* No OX_ID */
371
372         return io_req;
373
374 out_failed:
375         /* Record failure for stats and return NULL to caller */
376         qedf->alloc_failures++;
377         return NULL;
378 }
379
380 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
381 {
382         struct qedf_mp_req *mp_req = &(io_req->mp_req);
383         struct qedf_ctx *qedf = io_req->fcport->qedf;
384         uint64_t sz = sizeof(struct scsi_sge);
385
386         /* clear tm flags */
387         if (mp_req->mp_req_bd) {
388                 dma_free_coherent(&qedf->pdev->dev, sz,
389                     mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
390                 mp_req->mp_req_bd = NULL;
391         }
392         if (mp_req->mp_resp_bd) {
393                 dma_free_coherent(&qedf->pdev->dev, sz,
394                     mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
395                 mp_req->mp_resp_bd = NULL;
396         }
397         if (mp_req->req_buf) {
398                 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
399                     mp_req->req_buf, mp_req->req_buf_dma);
400                 mp_req->req_buf = NULL;
401         }
402         if (mp_req->resp_buf) {
403                 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
404                     mp_req->resp_buf, mp_req->resp_buf_dma);
405                 mp_req->resp_buf = NULL;
406         }
407 }
408
409 void qedf_release_cmd(struct kref *ref)
410 {
411         struct qedf_ioreq *io_req =
412             container_of(ref, struct qedf_ioreq, refcount);
413         struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
414         struct qedf_rport *fcport = io_req->fcport;
415
416         if (io_req->cmd_type == QEDF_ELS ||
417             io_req->cmd_type == QEDF_TASK_MGMT_CMD)
418                 qedf_free_mp_resc(io_req);
419
420         atomic_inc(&cmd_mgr->free_list_cnt);
421         atomic_dec(&fcport->num_active_ios);
422         if (atomic_read(&fcport->num_active_ios) < 0)
423                 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
424
425         /* Increment task retry identifier now that the request is released */
426         io_req->task_retry_identifier++;
427
428         clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
429 }
430
431 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
432         int bd_index)
433 {
434         struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
435         int frag_size, sg_frags;
436
437         sg_frags = 0;
438         while (sg_len) {
439                 if (sg_len > QEDF_BD_SPLIT_SZ)
440                         frag_size = QEDF_BD_SPLIT_SZ;
441                 else
442                         frag_size = sg_len;
443                 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
444                 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
445                 bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
446
447                 addr += (u64)frag_size;
448                 sg_frags++;
449                 sg_len -= frag_size;
450         }
451         return sg_frags;
452 }
453
454 static int qedf_map_sg(struct qedf_ioreq *io_req)
455 {
456         struct scsi_cmnd *sc = io_req->sc_cmd;
457         struct Scsi_Host *host = sc->device->host;
458         struct fc_lport *lport = shost_priv(host);
459         struct qedf_ctx *qedf = lport_priv(lport);
460         struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
461         struct scatterlist *sg;
462         int byte_count = 0;
463         int sg_count = 0;
464         int bd_count = 0;
465         int sg_frags;
466         unsigned int sg_len;
467         u64 addr, end_addr;
468         int i;
469
470         sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
471             scsi_sg_count(sc), sc->sc_data_direction);
472
473         sg = scsi_sglist(sc);
474
475         /*
476          * New condition to send single SGE as cached-SGL with length less
477          * than 64k.
478          */
479         if ((sg_count == 1) && (sg_dma_len(sg) <=
480             QEDF_MAX_SGLEN_FOR_CACHESGL)) {
481                 sg_len = sg_dma_len(sg);
482                 addr = (u64)sg_dma_address(sg);
483
484                 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
485                 bd[bd_count].sge_addr.hi = (addr >> 32);
486                 bd[bd_count].sge_len = (u16)sg_len;
487
488                 return ++bd_count;
489         }
490
491         scsi_for_each_sg(sc, sg, sg_count, i) {
492                 sg_len = sg_dma_len(sg);
493                 addr = (u64)sg_dma_address(sg);
494                 end_addr = (u64)(addr + sg_len);
495
496                 /*
497                  * First s/g element in the list so check if the end_addr
498                  * is paged aligned. Also check to make sure the length is
499                  * at least page size.
500                  */
501                 if ((i == 0) && (sg_count > 1) &&
502                     ((end_addr % QEDF_PAGE_SIZE) ||
503                     sg_len < QEDF_PAGE_SIZE))
504                         io_req->use_slowpath = true;
505                 /*
506                  * Last s/g element so check if the start address is paged
507                  * aligned.
508                  */
509                 else if ((i == (sg_count - 1)) && (sg_count > 1) &&
510                     (addr % QEDF_PAGE_SIZE))
511                         io_req->use_slowpath = true;
512                 /*
513                  * Intermediate s/g element so check if start and end address
514                  * is page aligned.
515                  */
516                 else if ((i != 0) && (i != (sg_count - 1)) &&
517                     ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
518                         io_req->use_slowpath = true;
519
520                 if (sg_len > QEDF_MAX_BD_LEN) {
521                         sg_frags = qedf_split_bd(io_req, addr, sg_len,
522                             bd_count);
523                 } else {
524                         sg_frags = 1;
525                         bd[bd_count].sge_addr.lo = U64_LO(addr);
526                         bd[bd_count].sge_addr.hi  = U64_HI(addr);
527                         bd[bd_count].sge_len = (uint16_t)sg_len;
528                 }
529
530                 bd_count += sg_frags;
531                 byte_count += sg_len;
532         }
533
534         if (byte_count != scsi_bufflen(sc))
535                 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
536                           "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
537                            scsi_bufflen(sc), io_req->xid);
538
539         return bd_count;
540 }
541
542 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
543 {
544         struct scsi_cmnd *sc = io_req->sc_cmd;
545         struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
546         int bd_count;
547
548         if (scsi_sg_count(sc)) {
549                 bd_count = qedf_map_sg(io_req);
550                 if (bd_count == 0)
551                         return -ENOMEM;
552         } else {
553                 bd_count = 0;
554                 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
555                 bd[0].sge_len = 0;
556         }
557         io_req->bd_tbl->bd_valid = bd_count;
558
559         return 0;
560 }
561
562 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
563                                   struct fcp_cmnd *fcp_cmnd)
564 {
565         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
566
567         /* fcp_cmnd is 32 bytes */
568         memset(fcp_cmnd, 0, FCP_CMND_LEN);
569
570         /* 8 bytes: SCSI LUN info */
571         int_to_scsilun(sc_cmd->device->lun,
572                         (struct scsi_lun *)&fcp_cmnd->fc_lun);
573
574         /* 4 bytes: flag info */
575         fcp_cmnd->fc_pri_ta = 0;
576         fcp_cmnd->fc_tm_flags = io_req->tm_flags;
577         fcp_cmnd->fc_flags = io_req->io_req_flags;
578         fcp_cmnd->fc_cmdref = 0;
579
580         /* Populate data direction */
581         if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
582                 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
583         } else {
584                 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
585                         fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
586                 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
587                         fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
588         }
589
590         fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
591
592         /* 16 bytes: CDB information */
593         if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
594                 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
595
596         /* 4 bytes: FCP data length */
597         fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
598 }
599
600 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
601         struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
602         struct fcoe_wqe *sqe)
603 {
604         enum fcoe_task_type task_type;
605         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
606         struct io_bdt *bd_tbl = io_req->bd_tbl;
607         u8 fcp_cmnd[32];
608         u32 tmp_fcp_cmnd[8];
609         int bd_count = 0;
610         struct qedf_ctx *qedf = fcport->qedf;
611         uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
612         struct regpair sense_data_buffer_phys_addr;
613         u32 tx_io_size = 0;
614         u32 rx_io_size = 0;
615         int i, cnt;
616
617         /* Note init_initiator_rw_fcoe_task memsets the task context */
618         io_req->task = task_ctx;
619         memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
620         memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
621         memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
622
623         /* Set task type bassed on DMA directio of command */
624         if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
625                 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
626         } else {
627                 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
628                         task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
629                         tx_io_size = io_req->data_xfer_len;
630                 } else {
631                         task_type = FCOE_TASK_TYPE_READ_INITIATOR;
632                         rx_io_size = io_req->data_xfer_len;
633                 }
634         }
635
636         /* Setup the fields for fcoe_task_params */
637         io_req->task_params->context = task_ctx;
638         io_req->task_params->sqe = sqe;
639         io_req->task_params->task_type = task_type;
640         io_req->task_params->tx_io_size = tx_io_size;
641         io_req->task_params->rx_io_size = rx_io_size;
642         io_req->task_params->conn_cid = fcport->fw_cid;
643         io_req->task_params->itid = io_req->xid;
644         io_req->task_params->cq_rss_number = cq_idx;
645         io_req->task_params->is_tape_device = fcport->dev_type;
646
647         /* Fill in information for scatter/gather list */
648         if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
649                 bd_count = bd_tbl->bd_valid;
650                 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
651                 io_req->sgl_task_params->sgl_phys_addr.lo =
652                         U64_LO(bd_tbl->bd_tbl_dma);
653                 io_req->sgl_task_params->sgl_phys_addr.hi =
654                         U64_HI(bd_tbl->bd_tbl_dma);
655                 io_req->sgl_task_params->num_sges = bd_count;
656                 io_req->sgl_task_params->total_buffer_size =
657                     scsi_bufflen(io_req->sc_cmd);
658                 io_req->sgl_task_params->small_mid_sge =
659                         io_req->use_slowpath;
660         }
661
662         /* Fill in physical address of sense buffer */
663         sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
664         sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
665
666         /* fill FCP_CMND IU */
667         qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
668
669         /* Swap fcp_cmnd since FC is big endian */
670         cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
671         for (i = 0; i < cnt; i++) {
672                 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
673         }
674         memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
675
676         init_initiator_rw_fcoe_task(io_req->task_params,
677                                     io_req->sgl_task_params,
678                                     sense_data_buffer_phys_addr,
679                                     io_req->task_retry_identifier, fcp_cmnd);
680
681         /* Increment SGL type counters */
682         if (bd_count == 1) {
683                 qedf->single_sge_ios++;
684                 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
685         } else if (io_req->use_slowpath) {
686                 qedf->slow_sge_ios++;
687                 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
688         } else {
689                 qedf->fast_sge_ios++;
690                 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
691         }
692 }
693
694 void qedf_init_mp_task(struct qedf_ioreq *io_req,
695         struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
696 {
697         struct qedf_mp_req *mp_req = &(io_req->mp_req);
698         struct qedf_rport *fcport = io_req->fcport;
699         struct qedf_ctx *qedf = io_req->fcport->qedf;
700         struct fc_frame_header *fc_hdr;
701         struct fcoe_tx_mid_path_params task_fc_hdr;
702         struct scsi_sgl_task_params tx_sgl_task_params;
703         struct scsi_sgl_task_params rx_sgl_task_params;
704
705         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
706                   "Initializing MP task for cmd_type=%d\n",
707                   io_req->cmd_type);
708
709         qedf->control_requests++;
710
711         memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
712         memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
713         memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
714         memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
715
716         /* Setup the task from io_req for easy reference */
717         io_req->task = task_ctx;
718
719         /* Setup the fields for fcoe_task_params */
720         io_req->task_params->context = task_ctx;
721         io_req->task_params->sqe = sqe;
722         io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
723         io_req->task_params->tx_io_size = io_req->data_xfer_len;
724         /* rx_io_size tells the f/w how large a response buffer we have */
725         io_req->task_params->rx_io_size = PAGE_SIZE;
726         io_req->task_params->conn_cid = fcport->fw_cid;
727         io_req->task_params->itid = io_req->xid;
728         /* Return middle path commands on CQ 0 */
729         io_req->task_params->cq_rss_number = 0;
730         io_req->task_params->is_tape_device = fcport->dev_type;
731
732         fc_hdr = &(mp_req->req_fc_hdr);
733         /* Set OX_ID and RX_ID based on driver task id */
734         fc_hdr->fh_ox_id = io_req->xid;
735         fc_hdr->fh_rx_id = htons(0xffff);
736
737         /* Set up FC header information */
738         task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
739         task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
740         task_fc_hdr.type = fc_hdr->fh_type;
741         task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
742         task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
743         task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
744         task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
745
746         /* Set up s/g list parameters for request buffer */
747         tx_sgl_task_params.sgl = mp_req->mp_req_bd;
748         tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
749         tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
750         tx_sgl_task_params.num_sges = 1;
751         /* Set PAGE_SIZE for now since sg element is that size ??? */
752         tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
753         tx_sgl_task_params.small_mid_sge = 0;
754
755         /* Set up s/g list parameters for request buffer */
756         rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
757         rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
758         rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
759         rx_sgl_task_params.num_sges = 1;
760         /* Set PAGE_SIZE for now since sg element is that size ??? */
761         rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
762         rx_sgl_task_params.small_mid_sge = 0;
763
764
765         /*
766          * Last arg is 0 as previous code did not set that we wanted the
767          * fc header information.
768          */
769         init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
770                                                      &task_fc_hdr,
771                                                      &tx_sgl_task_params,
772                                                      &rx_sgl_task_params, 0);
773
774         /* Midpath requests always consume 1 SGE */
775         qedf->single_sge_ios++;
776 }
777
778 /* Presumed that fcport->rport_lock is held */
779 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
780 {
781         uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
782         u16 rval;
783
784         rval = fcport->sq_prod_idx;
785
786         /* Adjust ring index */
787         fcport->sq_prod_idx++;
788         fcport->fw_sq_prod_idx++;
789         if (fcport->sq_prod_idx == total_sqe)
790                 fcport->sq_prod_idx = 0;
791
792         return rval;
793 }
794
795 void qedf_ring_doorbell(struct qedf_rport *fcport)
796 {
797         struct fcoe_db_data dbell = { 0 };
798
799         dbell.agg_flags = 0;
800
801         dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
802         dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
803         dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
804             FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
805
806         dbell.sq_prod = fcport->fw_sq_prod_idx;
807         writel(*(u32 *)&dbell, fcport->p_doorbell);
808         /* Make sure SQ index is updated so f/w prcesses requests in order */
809         wmb();
810         mmiowb();
811 }
812
813 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
814                           int8_t direction)
815 {
816         struct qedf_ctx *qedf = fcport->qedf;
817         struct qedf_io_log *io_log;
818         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
819         unsigned long flags;
820         uint8_t op;
821
822         spin_lock_irqsave(&qedf->io_trace_lock, flags);
823
824         io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
825         io_log->direction = direction;
826         io_log->task_id = io_req->xid;
827         io_log->port_id = fcport->rdata->ids.port_id;
828         io_log->lun = sc_cmd->device->lun;
829         io_log->op = op = sc_cmd->cmnd[0];
830         io_log->lba[0] = sc_cmd->cmnd[2];
831         io_log->lba[1] = sc_cmd->cmnd[3];
832         io_log->lba[2] = sc_cmd->cmnd[4];
833         io_log->lba[3] = sc_cmd->cmnd[5];
834         io_log->bufflen = scsi_bufflen(sc_cmd);
835         io_log->sg_count = scsi_sg_count(sc_cmd);
836         io_log->result = sc_cmd->result;
837         io_log->jiffies = jiffies;
838         io_log->refcount = kref_read(&io_req->refcount);
839
840         if (direction == QEDF_IO_TRACE_REQ) {
841                 /* For requests we only care abot the submission CPU */
842                 io_log->req_cpu = io_req->cpu;
843                 io_log->int_cpu = 0;
844                 io_log->rsp_cpu = 0;
845         } else if (direction == QEDF_IO_TRACE_RSP) {
846                 io_log->req_cpu = io_req->cpu;
847                 io_log->int_cpu = io_req->int_cpu;
848                 io_log->rsp_cpu = smp_processor_id();
849         }
850
851         io_log->sge_type = io_req->sge_type;
852
853         qedf->io_trace_idx++;
854         if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
855                 qedf->io_trace_idx = 0;
856
857         spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
858 }
859
860 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
861 {
862         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
863         struct Scsi_Host *host = sc_cmd->device->host;
864         struct fc_lport *lport = shost_priv(host);
865         struct qedf_ctx *qedf = lport_priv(lport);
866         struct e4_fcoe_task_context *task_ctx;
867         u16 xid;
868         enum fcoe_task_type req_type = 0;
869         struct fcoe_wqe *sqe;
870         u16 sqe_idx;
871
872         /* Initialize rest of io_req fileds */
873         io_req->data_xfer_len = scsi_bufflen(sc_cmd);
874         sc_cmd->SCp.ptr = (char *)io_req;
875         io_req->use_slowpath = false; /* Assume fast SGL by default */
876
877         /* Record which cpu this request is associated with */
878         io_req->cpu = smp_processor_id();
879
880         if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
881                 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
882                 io_req->io_req_flags = QEDF_READ;
883                 qedf->input_requests++;
884         } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
885                 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
886                 io_req->io_req_flags = QEDF_WRITE;
887                 qedf->output_requests++;
888         } else {
889                 io_req->io_req_flags = 0;
890                 qedf->control_requests++;
891         }
892
893         xid = io_req->xid;
894
895         /* Build buffer descriptor list for firmware from sg list */
896         if (qedf_build_bd_list_from_sg(io_req)) {
897                 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
898                 kref_put(&io_req->refcount, qedf_release_cmd);
899                 return -EAGAIN;
900         }
901
902         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
903                 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
904                 kref_put(&io_req->refcount, qedf_release_cmd);
905                 return -EINVAL;
906         }
907
908         /* Obtain free SQE */
909         sqe_idx = qedf_get_sqe_idx(fcport);
910         sqe = &fcport->sq[sqe_idx];
911         memset(sqe, 0, sizeof(struct fcoe_wqe));
912
913         /* Get the task context */
914         task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
915         if (!task_ctx) {
916                 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
917                            xid);
918                 kref_put(&io_req->refcount, qedf_release_cmd);
919                 return -EINVAL;
920         }
921
922         qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
923
924         /* Ring doorbell */
925         qedf_ring_doorbell(fcport);
926
927         if (qedf_io_tracing && io_req->sc_cmd)
928                 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
929
930         return false;
931 }
932
933 int
934 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
935 {
936         struct fc_lport *lport = shost_priv(host);
937         struct qedf_ctx *qedf = lport_priv(lport);
938         struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
939         struct fc_rport_libfc_priv *rp = rport->dd_data;
940         struct qedf_rport *fcport;
941         struct qedf_ioreq *io_req;
942         int rc = 0;
943         int rval;
944         unsigned long flags = 0;
945
946
947         if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
948             test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
949                 sc_cmd->result = DID_NO_CONNECT << 16;
950                 sc_cmd->scsi_done(sc_cmd);
951                 return 0;
952         }
953
954         if (!qedf->pdev->msix_enabled) {
955                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
956                     "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
957                     sc_cmd);
958                 sc_cmd->result = DID_NO_CONNECT << 16;
959                 sc_cmd->scsi_done(sc_cmd);
960                 return 0;
961         }
962
963         rval = fc_remote_port_chkready(rport);
964         if (rval) {
965                 sc_cmd->result = rval;
966                 sc_cmd->scsi_done(sc_cmd);
967                 return 0;
968         }
969
970         /* Retry command if we are doing a qed drain operation */
971         if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
972                 rc = SCSI_MLQUEUE_HOST_BUSY;
973                 goto exit_qcmd;
974         }
975
976         if (lport->state != LPORT_ST_READY ||
977             atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
978                 rc = SCSI_MLQUEUE_HOST_BUSY;
979                 goto exit_qcmd;
980         }
981
982         /* rport and tgt are allocated together, so tgt should be non-NULL */
983         fcport = (struct qedf_rport *)&rp[1];
984
985         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
986                 /*
987                  * Session is not offloaded yet. Let SCSI-ml retry
988                  * the command.
989                  */
990                 rc = SCSI_MLQUEUE_TARGET_BUSY;
991                 goto exit_qcmd;
992         }
993         if (fcport->retry_delay_timestamp) {
994                 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
995                         fcport->retry_delay_timestamp = 0;
996                 } else {
997                         /* If retry_delay timer is active, flow off the ML */
998                         rc = SCSI_MLQUEUE_TARGET_BUSY;
999                         goto exit_qcmd;
1000                 }
1001         }
1002
1003         io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1004         if (!io_req) {
1005                 rc = SCSI_MLQUEUE_HOST_BUSY;
1006                 goto exit_qcmd;
1007         }
1008
1009         io_req->sc_cmd = sc_cmd;
1010
1011         /* Take fcport->rport_lock for posting to fcport send queue */
1012         spin_lock_irqsave(&fcport->rport_lock, flags);
1013         if (qedf_post_io_req(fcport, io_req)) {
1014                 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1015                 /* Return SQE to pool */
1016                 atomic_inc(&fcport->free_sqes);
1017                 rc = SCSI_MLQUEUE_HOST_BUSY;
1018         }
1019         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1020
1021 exit_qcmd:
1022         return rc;
1023 }
1024
1025 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1026                                  struct fcoe_cqe_rsp_info *fcp_rsp)
1027 {
1028         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1029         struct qedf_ctx *qedf = io_req->fcport->qedf;
1030         u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1031         int fcp_sns_len = 0;
1032         int fcp_rsp_len = 0;
1033         uint8_t *rsp_info, *sense_data;
1034
1035         io_req->fcp_status = FC_GOOD;
1036         io_req->fcp_resid = 0;
1037         if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1038             FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1039                 io_req->fcp_resid = fcp_rsp->fcp_resid;
1040
1041         io_req->scsi_comp_flags = rsp_flags;
1042         CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1043             fcp_rsp->scsi_status_code;
1044
1045         if (rsp_flags &
1046             FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1047                 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1048
1049         if (rsp_flags &
1050             FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1051                 fcp_sns_len = fcp_rsp->fcp_sns_len;
1052
1053         io_req->fcp_rsp_len = fcp_rsp_len;
1054         io_req->fcp_sns_len = fcp_sns_len;
1055         rsp_info = sense_data = io_req->sense_buffer;
1056
1057         /* fetch fcp_rsp_code */
1058         if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1059                 /* Only for task management function */
1060                 io_req->fcp_rsp_code = rsp_info[3];
1061                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1062                     "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1063                 /* Adjust sense-data location. */
1064                 sense_data += fcp_rsp_len;
1065         }
1066
1067         if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1068                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1069                     "Truncating sense buffer\n");
1070                 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1071         }
1072
1073         /* The sense buffer can be NULL for TMF commands */
1074         if (sc_cmd->sense_buffer) {
1075                 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1076                 if (fcp_sns_len)
1077                         memcpy(sc_cmd->sense_buffer, sense_data,
1078                             fcp_sns_len);
1079         }
1080 }
1081
1082 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1083 {
1084         struct scsi_cmnd *sc = io_req->sc_cmd;
1085
1086         if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1087                 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1088                     scsi_sg_count(sc), sc->sc_data_direction);
1089                 io_req->bd_tbl->bd_valid = 0;
1090         }
1091 }
1092
1093 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1094         struct qedf_ioreq *io_req)
1095 {
1096         u16 xid, rval;
1097         struct e4_fcoe_task_context *task_ctx;
1098         struct scsi_cmnd *sc_cmd;
1099         struct fcoe_cqe_rsp_info *fcp_rsp;
1100         struct qedf_rport *fcport;
1101         int refcount;
1102         u16 scope, qualifier = 0;
1103         u8 fw_residual_flag = 0;
1104
1105         if (!io_req)
1106                 return;
1107         if (!cqe)
1108                 return;
1109
1110         xid = io_req->xid;
1111         task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1112         sc_cmd = io_req->sc_cmd;
1113         fcp_rsp = &cqe->cqe_info.rsp_info;
1114
1115         if (!sc_cmd) {
1116                 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1117                 return;
1118         }
1119
1120         if (!sc_cmd->SCp.ptr) {
1121                 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1122                     "another context.\n");
1123                 return;
1124         }
1125
1126         if (!sc_cmd->request) {
1127                 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1128                     "sc_cmd=%p.\n", sc_cmd);
1129                 return;
1130         }
1131
1132         if (!sc_cmd->request->special) {
1133                 QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
1134                     "request not valid, sc_cmd=%p.\n", sc_cmd);
1135                 return;
1136         }
1137
1138         if (!sc_cmd->request->q) {
1139                 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1140                    "is not valid, sc_cmd=%p.\n", sc_cmd);
1141                 return;
1142         }
1143
1144         fcport = io_req->fcport;
1145
1146         qedf_parse_fcp_rsp(io_req, fcp_rsp);
1147
1148         qedf_unmap_sg_list(qedf, io_req);
1149
1150         /* Check for FCP transport error */
1151         if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1152                 QEDF_ERR(&(qedf->dbg_ctx),
1153                     "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1154                     "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1155                     io_req->fcp_rsp_code);
1156                 sc_cmd->result = DID_BUS_BUSY << 16;
1157                 goto out;
1158         }
1159
1160         fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1161             FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1162         if (fw_residual_flag) {
1163                 QEDF_ERR(&(qedf->dbg_ctx),
1164                     "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1165                     "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1166                     fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1167                     cqe->cqe_info.rsp_info.fw_residual);
1168
1169                 if (io_req->cdb_status == 0)
1170                         sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1171                 else
1172                         sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1173
1174                 /* Abort the command since we did not get all the data */
1175                 init_completion(&io_req->abts_done);
1176                 rval = qedf_initiate_abts(io_req, true);
1177                 if (rval) {
1178                         QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1179                         sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1180                 }
1181
1182                 /*
1183                  * Set resid to the whole buffer length so we won't try to resue
1184                  * any previously data.
1185                  */
1186                 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1187                 goto out;
1188         }
1189
1190         switch (io_req->fcp_status) {
1191         case FC_GOOD:
1192                 if (io_req->cdb_status == 0) {
1193                         /* Good I/O completion */
1194                         sc_cmd->result = DID_OK << 16;
1195                 } else {
1196                         refcount = kref_read(&io_req->refcount);
1197                         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1198                             "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1199                             "lba=%02x%02x%02x%02x cdb_status=%d "
1200                             "fcp_resid=0x%x refcount=%d.\n",
1201                             qedf->lport->host->host_no, sc_cmd->device->id,
1202                             sc_cmd->device->lun, io_req->xid,
1203                             sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1204                             sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1205                             io_req->cdb_status, io_req->fcp_resid,
1206                             refcount);
1207                         sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1208
1209                         if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1210                             io_req->cdb_status == SAM_STAT_BUSY) {
1211                                 /*
1212                                  * Check whether we need to set retry_delay at
1213                                  * all based on retry_delay module parameter
1214                                  * and the status qualifier.
1215                                  */
1216
1217                                 /* Upper 2 bits */
1218                                 scope = fcp_rsp->retry_delay_timer & 0xC000;
1219                                 /* Lower 14 bits */
1220                                 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1221
1222                                 if (qedf_retry_delay &&
1223                                     scope > 0 && qualifier > 0 &&
1224                                     qualifier <= 0x3FEF) {
1225                                         /* Check we don't go over the max */
1226                                         if (qualifier > QEDF_RETRY_DELAY_MAX)
1227                                                 qualifier =
1228                                                     QEDF_RETRY_DELAY_MAX;
1229                                         fcport->retry_delay_timestamp =
1230                                             jiffies + (qualifier * HZ / 10);
1231                                 }
1232                                 /* Record stats */
1233                                 if (io_req->cdb_status ==
1234                                     SAM_STAT_TASK_SET_FULL)
1235                                         qedf->task_set_fulls++;
1236                                 else
1237                                         qedf->busy++;
1238                         }
1239                 }
1240                 if (io_req->fcp_resid)
1241                         scsi_set_resid(sc_cmd, io_req->fcp_resid);
1242                 break;
1243         default:
1244                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1245                            io_req->fcp_status);
1246                 break;
1247         }
1248
1249 out:
1250         if (qedf_io_tracing)
1251                 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1252
1253         io_req->sc_cmd = NULL;
1254         sc_cmd->SCp.ptr =  NULL;
1255         sc_cmd->scsi_done(sc_cmd);
1256         kref_put(&io_req->refcount, qedf_release_cmd);
1257 }
1258
1259 /* Return a SCSI command in some other context besides a normal completion */
1260 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1261         int result)
1262 {
1263         u16 xid;
1264         struct scsi_cmnd *sc_cmd;
1265         int refcount;
1266
1267         if (!io_req)
1268                 return;
1269
1270         xid = io_req->xid;
1271         sc_cmd = io_req->sc_cmd;
1272
1273         if (!sc_cmd) {
1274                 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1275                 return;
1276         }
1277
1278         if (!sc_cmd->SCp.ptr) {
1279                 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1280                     "another context.\n");
1281                 return;
1282         }
1283
1284         qedf_unmap_sg_list(qedf, io_req);
1285
1286         sc_cmd->result = result << 16;
1287         refcount = kref_read(&io_req->refcount);
1288         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1289             "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1290             "allowed=%d retries=%d refcount=%d.\n",
1291             qedf->lport->host->host_no, sc_cmd->device->id,
1292             sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1293             sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1294             sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1295             refcount);
1296
1297         /*
1298          * Set resid to the whole buffer length so we won't try to resue any
1299          * previously read data
1300          */
1301         scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1302
1303         if (qedf_io_tracing)
1304                 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1305
1306         io_req->sc_cmd = NULL;
1307         sc_cmd->SCp.ptr = NULL;
1308         sc_cmd->scsi_done(sc_cmd);
1309         kref_put(&io_req->refcount, qedf_release_cmd);
1310 }
1311
1312 /*
1313  * Handle warning type CQE completions. This is mainly used for REC timer
1314  * popping.
1315  */
1316 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1317         struct qedf_ioreq *io_req)
1318 {
1319         int rval, i;
1320         struct qedf_rport *fcport = io_req->fcport;
1321         u64 err_warn_bit_map;
1322         u8 err_warn = 0xff;
1323
1324         if (!cqe)
1325                 return;
1326
1327         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1328                   "xid=0x%x\n", io_req->xid);
1329         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1330                   "err_warn_bitmap=%08x:%08x\n",
1331                   le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1332                   le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1333         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1334                   "rx_buff_off=%08x, rx_id=%04x\n",
1335                   le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1336                   le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1337                   le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1338
1339         /* Normalize the error bitmap value to an just an unsigned int */
1340         err_warn_bit_map = (u64)
1341             ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1342             (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1343         for (i = 0; i < 64; i++) {
1344                 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1345                         err_warn = i;
1346                         break;
1347                 }
1348         }
1349
1350         /* Check if REC TOV expired if this is a tape device */
1351         if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1352                 if (err_warn ==
1353                     FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1354                         QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1355                         if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1356                                 io_req->rx_buf_off =
1357                                     cqe->cqe_info.err_info.rx_buf_off;
1358                                 io_req->tx_buf_off =
1359                                     cqe->cqe_info.err_info.tx_buf_off;
1360                                 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1361                                 rval = qedf_send_rec(io_req);
1362                                 /*
1363                                  * We only want to abort the io_req if we
1364                                  * can't queue the REC command as we want to
1365                                  * keep the exchange open for recovery.
1366                                  */
1367                                 if (rval)
1368                                         goto send_abort;
1369                         }
1370                         return;
1371                 }
1372         }
1373
1374 send_abort:
1375         init_completion(&io_req->abts_done);
1376         rval = qedf_initiate_abts(io_req, true);
1377         if (rval)
1378                 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1379 }
1380
1381 /* Cleanup a command when we receive an error detection completion */
1382 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1383         struct qedf_ioreq *io_req)
1384 {
1385         int rval;
1386
1387         if (!cqe)
1388                 return;
1389
1390         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1391                   "xid=0x%x\n", io_req->xid);
1392         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1393                   "err_warn_bitmap=%08x:%08x\n",
1394                   le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1395                   le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1396         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1397                   "rx_buff_off=%08x, rx_id=%04x\n",
1398                   le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1399                   le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1400                   le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1401
1402         if (qedf->stop_io_on_error) {
1403                 qedf_stop_all_io(qedf);
1404                 return;
1405         }
1406
1407         init_completion(&io_req->abts_done);
1408         rval = qedf_initiate_abts(io_req, true);
1409         if (rval)
1410                 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1411 }
1412
1413 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1414         struct qedf_ioreq *els_req)
1415 {
1416         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1417             "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1418             kref_read(&els_req->refcount));
1419
1420         /*
1421          * Need to distinguish this from a timeout when calling the
1422          * els_req->cb_func.
1423          */
1424         els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1425
1426         /* Cancel the timer */
1427         cancel_delayed_work_sync(&els_req->timeout_work);
1428
1429         /* Call callback function to complete command */
1430         if (els_req->cb_func && els_req->cb_arg) {
1431                 els_req->cb_func(els_req->cb_arg);
1432                 els_req->cb_arg = NULL;
1433         }
1434
1435         /* Release kref for original initiate_els */
1436         kref_put(&els_req->refcount, qedf_release_cmd);
1437 }
1438
1439 /* A value of -1 for lun is a wild card that means flush all
1440  * active SCSI I/Os for the target.
1441  */
1442 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1443 {
1444         struct qedf_ioreq *io_req;
1445         struct qedf_ctx *qedf;
1446         struct qedf_cmd_mgr *cmd_mgr;
1447         int i, rc;
1448
1449         if (!fcport)
1450                 return;
1451
1452         /* Check that fcport is still offloaded */
1453         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1454                 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1455                 return;
1456         }
1457
1458         qedf = fcport->qedf;
1459         cmd_mgr = qedf->cmd_mgr;
1460
1461         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1462
1463         for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1464                 io_req = &cmd_mgr->cmds[i];
1465
1466                 if (!io_req)
1467                         continue;
1468                 if (io_req->fcport != fcport)
1469                         continue;
1470                 if (io_req->cmd_type == QEDF_ELS) {
1471                         rc = kref_get_unless_zero(&io_req->refcount);
1472                         if (!rc) {
1473                                 QEDF_ERR(&(qedf->dbg_ctx),
1474                                     "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1475                                     io_req, io_req->xid);
1476                                 continue;
1477                         }
1478                         qedf_flush_els_req(qedf, io_req);
1479                         /*
1480                          * Release the kref and go back to the top of the
1481                          * loop.
1482                          */
1483                         goto free_cmd;
1484                 }
1485
1486                 if (io_req->cmd_type == QEDF_ABTS) {
1487                         rc = kref_get_unless_zero(&io_req->refcount);
1488                         if (!rc) {
1489                                 QEDF_ERR(&(qedf->dbg_ctx),
1490                                     "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1491                                     io_req, io_req->xid);
1492                                 continue;
1493                         }
1494                         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1495                             "Flushing abort xid=0x%x.\n", io_req->xid);
1496
1497                         clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1498
1499                         if (io_req->sc_cmd) {
1500                                 if (io_req->return_scsi_cmd_on_abts)
1501                                         qedf_scsi_done(qedf, io_req, DID_ERROR);
1502                         }
1503
1504                         /* Notify eh_abort handler that ABTS is complete */
1505                         complete(&io_req->abts_done);
1506                         kref_put(&io_req->refcount, qedf_release_cmd);
1507
1508                         goto free_cmd;
1509                 }
1510
1511                 if (!io_req->sc_cmd)
1512                         continue;
1513                 if (lun > 0) {
1514                         if (io_req->sc_cmd->device->lun !=
1515                             (u64)lun)
1516                                 continue;
1517                 }
1518
1519                 /*
1520                  * Use kref_get_unless_zero in the unlikely case the command
1521                  * we're about to flush was completed in the normal SCSI path
1522                  */
1523                 rc = kref_get_unless_zero(&io_req->refcount);
1524                 if (!rc) {
1525                         QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1526                             "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1527                         continue;
1528                 }
1529                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1530                     "Cleanup xid=0x%x.\n", io_req->xid);
1531
1532                 /* Cleanup task and return I/O mid-layer */
1533                 qedf_initiate_cleanup(io_req, true);
1534
1535 free_cmd:
1536                 kref_put(&io_req->refcount, qedf_release_cmd);
1537         }
1538 }
1539
1540 /*
1541  * Initiate a ABTS middle path command. Note that we don't have to initialize
1542  * the task context for an ABTS task.
1543  */
1544 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1545 {
1546         struct fc_lport *lport;
1547         struct qedf_rport *fcport = io_req->fcport;
1548         struct fc_rport_priv *rdata;
1549         struct qedf_ctx *qedf;
1550         u16 xid;
1551         u32 r_a_tov = 0;
1552         int rc = 0;
1553         unsigned long flags;
1554         struct fcoe_wqe *sqe;
1555         u16 sqe_idx;
1556
1557         /* Sanity check qedf_rport before dereferencing any pointers */
1558         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1559                 QEDF_ERR(NULL, "tgt not offloaded\n");
1560                 rc = 1;
1561                 goto abts_err;
1562         }
1563
1564         rdata = fcport->rdata;
1565         r_a_tov = rdata->r_a_tov;
1566         qedf = fcport->qedf;
1567         lport = qedf->lport;
1568
1569         if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1570                 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1571                 rc = 1;
1572                 goto abts_err;
1573         }
1574
1575         if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1576                 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1577                 rc = 1;
1578                 goto abts_err;
1579         }
1580
1581         /* Ensure room on SQ */
1582         if (!atomic_read(&fcport->free_sqes)) {
1583                 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1584                 rc = 1;
1585                 goto abts_err;
1586         }
1587
1588         if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1589                 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1590                 rc = 1;
1591                 goto out;
1592         }
1593
1594         if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1595             test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1596             test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1597                 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1598                           "cleanup or abort processing or already "
1599                           "completed.\n", io_req->xid);
1600                 rc = 1;
1601                 goto out;
1602         }
1603
1604         kref_get(&io_req->refcount);
1605
1606         xid = io_req->xid;
1607         qedf->control_requests++;
1608         qedf->packet_aborts++;
1609
1610         /* Set the return CPU to be the same as the request one */
1611         io_req->cpu = smp_processor_id();
1612
1613         /* Set the command type to abort */
1614         io_req->cmd_type = QEDF_ABTS;
1615         io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1616
1617         set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1618         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1619                    "0x%x\n", xid);
1620
1621         qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1622
1623         spin_lock_irqsave(&fcport->rport_lock, flags);
1624
1625         sqe_idx = qedf_get_sqe_idx(fcport);
1626         sqe = &fcport->sq[sqe_idx];
1627         memset(sqe, 0, sizeof(struct fcoe_wqe));
1628         io_req->task_params->sqe = sqe;
1629
1630         init_initiator_abort_fcoe_task(io_req->task_params);
1631         qedf_ring_doorbell(fcport);
1632
1633         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1634
1635         return rc;
1636 abts_err:
1637         /*
1638          * If the ABTS task fails to queue then we need to cleanup the
1639          * task at the firmware.
1640          */
1641         qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1642 out:
1643         return rc;
1644 }
1645
1646 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1647         struct qedf_ioreq *io_req)
1648 {
1649         uint32_t r_ctl;
1650         uint16_t xid;
1651
1652         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1653                    "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1654
1655         cancel_delayed_work(&io_req->timeout_work);
1656
1657         xid = io_req->xid;
1658         r_ctl = cqe->cqe_info.abts_info.r_ctl;
1659
1660         switch (r_ctl) {
1661         case FC_RCTL_BA_ACC:
1662                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1663                     "ABTS response - ACC Send RRQ after R_A_TOV\n");
1664                 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1665                 /*
1666                  * Dont release this cmd yet. It will be relesed
1667                  * after we get RRQ response
1668                  */
1669                 kref_get(&io_req->refcount);
1670                 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1671                     msecs_to_jiffies(qedf->lport->r_a_tov));
1672                 break;
1673         /* For error cases let the cleanup return the command */
1674         case FC_RCTL_BA_RJT:
1675                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1676                    "ABTS response - RJT\n");
1677                 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1678                 break;
1679         default:
1680                 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1681                 break;
1682         }
1683
1684         clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1685
1686         if (io_req->sc_cmd) {
1687                 if (io_req->return_scsi_cmd_on_abts)
1688                         qedf_scsi_done(qedf, io_req, DID_ERROR);
1689         }
1690
1691         /* Notify eh_abort handler that ABTS is complete */
1692         complete(&io_req->abts_done);
1693
1694         kref_put(&io_req->refcount, qedf_release_cmd);
1695 }
1696
1697 int qedf_init_mp_req(struct qedf_ioreq *io_req)
1698 {
1699         struct qedf_mp_req *mp_req;
1700         struct scsi_sge *mp_req_bd;
1701         struct scsi_sge *mp_resp_bd;
1702         struct qedf_ctx *qedf = io_req->fcport->qedf;
1703         dma_addr_t addr;
1704         uint64_t sz;
1705
1706         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1707
1708         mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1709         memset(mp_req, 0, sizeof(struct qedf_mp_req));
1710
1711         if (io_req->cmd_type != QEDF_ELS) {
1712                 mp_req->req_len = sizeof(struct fcp_cmnd);
1713                 io_req->data_xfer_len = mp_req->req_len;
1714         } else
1715                 mp_req->req_len = io_req->data_xfer_len;
1716
1717         mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1718             &mp_req->req_buf_dma, GFP_KERNEL);
1719         if (!mp_req->req_buf) {
1720                 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1721                 qedf_free_mp_resc(io_req);
1722                 return -ENOMEM;
1723         }
1724
1725         mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1726             QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
1727         if (!mp_req->resp_buf) {
1728                 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
1729                           "buffer\n");
1730                 qedf_free_mp_resc(io_req);
1731                 return -ENOMEM;
1732         }
1733
1734         /* Allocate and map mp_req_bd and mp_resp_bd */
1735         sz = sizeof(struct scsi_sge);
1736         mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1737             &mp_req->mp_req_bd_dma, GFP_KERNEL);
1738         if (!mp_req->mp_req_bd) {
1739                 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
1740                 qedf_free_mp_resc(io_req);
1741                 return -ENOMEM;
1742         }
1743
1744         mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1745             &mp_req->mp_resp_bd_dma, GFP_KERNEL);
1746         if (!mp_req->mp_resp_bd) {
1747                 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
1748                 qedf_free_mp_resc(io_req);
1749                 return -ENOMEM;
1750         }
1751
1752         /* Fill bd table */
1753         addr = mp_req->req_buf_dma;
1754         mp_req_bd = mp_req->mp_req_bd;
1755         mp_req_bd->sge_addr.lo = U64_LO(addr);
1756         mp_req_bd->sge_addr.hi = U64_HI(addr);
1757         mp_req_bd->sge_len = QEDF_PAGE_SIZE;
1758
1759         /*
1760          * MP buffer is either a task mgmt command or an ELS.
1761          * So the assumption is that it consumes a single bd
1762          * entry in the bd table
1763          */
1764         mp_resp_bd = mp_req->mp_resp_bd;
1765         addr = mp_req->resp_buf_dma;
1766         mp_resp_bd->sge_addr.lo = U64_LO(addr);
1767         mp_resp_bd->sge_addr.hi = U64_HI(addr);
1768         mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
1769
1770         return 0;
1771 }
1772
1773 /*
1774  * Last ditch effort to clear the port if it's stuck. Used only after a
1775  * cleanup task times out.
1776  */
1777 static void qedf_drain_request(struct qedf_ctx *qedf)
1778 {
1779         if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1780                 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
1781                 return;
1782         }
1783
1784         /* Set bit to return all queuecommand requests as busy */
1785         set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1786
1787         /* Call qed drain request for function. Should be synchronous */
1788         qed_ops->common->drain(qedf->cdev);
1789
1790         /* Settle time for CQEs to be returned */
1791         msleep(100);
1792
1793         /* Unplug and continue */
1794         clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1795 }
1796
1797 /*
1798  * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1799  * FAILURE.
1800  */
1801 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1802         bool return_scsi_cmd_on_abts)
1803 {
1804         struct qedf_rport *fcport;
1805         struct qedf_ctx *qedf;
1806         uint16_t xid;
1807         struct e4_fcoe_task_context *task;
1808         int tmo = 0;
1809         int rc = SUCCESS;
1810         unsigned long flags;
1811         struct fcoe_wqe *sqe;
1812         u16 sqe_idx;
1813
1814         fcport = io_req->fcport;
1815         if (!fcport) {
1816                 QEDF_ERR(NULL, "fcport is NULL.\n");
1817                 return SUCCESS;
1818         }
1819
1820         /* Sanity check qedf_rport before dereferencing any pointers */
1821         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1822                 QEDF_ERR(NULL, "tgt not offloaded\n");
1823                 rc = 1;
1824                 return SUCCESS;
1825         }
1826
1827         qedf = fcport->qedf;
1828         if (!qedf) {
1829                 QEDF_ERR(NULL, "qedf is NULL.\n");
1830                 return SUCCESS;
1831         }
1832
1833         if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1834             test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
1835                 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1836                           "cleanup processing or already completed.\n",
1837                           io_req->xid);
1838                 return SUCCESS;
1839         }
1840
1841         /* Ensure room on SQ */
1842         if (!atomic_read(&fcport->free_sqes)) {
1843                 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1844                 return FAILED;
1845         }
1846
1847
1848         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1849             io_req->xid);
1850
1851         /* Cleanup cmds re-use the same TID as the original I/O */
1852         xid = io_req->xid;
1853         io_req->cmd_type = QEDF_CLEANUP;
1854         io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1855
1856         /* Set the return CPU to be the same as the request one */
1857         io_req->cpu = smp_processor_id();
1858
1859         set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1860
1861         task = qedf_get_task_mem(&qedf->tasks, xid);
1862
1863         init_completion(&io_req->tm_done);
1864
1865         spin_lock_irqsave(&fcport->rport_lock, flags);
1866
1867         sqe_idx = qedf_get_sqe_idx(fcport);
1868         sqe = &fcport->sq[sqe_idx];
1869         memset(sqe, 0, sizeof(struct fcoe_wqe));
1870         io_req->task_params->sqe = sqe;
1871
1872         init_initiator_cleanup_fcoe_task(io_req->task_params);
1873         qedf_ring_doorbell(fcport);
1874
1875         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1876
1877         tmo = wait_for_completion_timeout(&io_req->tm_done,
1878             QEDF_CLEANUP_TIMEOUT * HZ);
1879
1880         if (!tmo) {
1881                 rc = FAILED;
1882                 /* Timeout case */
1883                 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
1884                           "xid=%x.\n", io_req->xid);
1885                 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1886                 /* Issue a drain request if cleanup task times out */
1887                 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
1888                 qedf_drain_request(qedf);
1889         }
1890
1891         if (io_req->sc_cmd) {
1892                 if (io_req->return_scsi_cmd_on_abts)
1893                         qedf_scsi_done(qedf, io_req, DID_ERROR);
1894         }
1895
1896         if (rc == SUCCESS)
1897                 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
1898         else
1899                 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
1900
1901         return rc;
1902 }
1903
1904 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1905         struct qedf_ioreq *io_req)
1906 {
1907         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
1908                    io_req->xid);
1909
1910         clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1911
1912         /* Complete so we can finish cleaning up the I/O */
1913         complete(&io_req->tm_done);
1914 }
1915
1916 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1917         uint8_t tm_flags)
1918 {
1919         struct qedf_ioreq *io_req;
1920         struct e4_fcoe_task_context *task;
1921         struct qedf_ctx *qedf = fcport->qedf;
1922         struct fc_lport *lport = qedf->lport;
1923         int rc = 0;
1924         uint16_t xid;
1925         int tmo = 0;
1926         unsigned long flags;
1927         struct fcoe_wqe *sqe;
1928         u16 sqe_idx;
1929
1930         if (!sc_cmd) {
1931                 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
1932                 return FAILED;
1933         }
1934
1935         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1936                 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
1937                 rc = FAILED;
1938                 return FAILED;
1939         }
1940
1941         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
1942                    "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
1943
1944         io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
1945         if (!io_req) {
1946                 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
1947                 rc = -EAGAIN;
1948                 goto reset_tmf_err;
1949         }
1950
1951         if (tm_flags == FCP_TMF_LUN_RESET)
1952                 qedf->lun_resets++;
1953         else if (tm_flags == FCP_TMF_TGT_RESET)
1954                 qedf->target_resets++;
1955
1956         /* Initialize rest of io_req fields */
1957         io_req->sc_cmd = sc_cmd;
1958         io_req->fcport = fcport;
1959         io_req->cmd_type = QEDF_TASK_MGMT_CMD;
1960
1961         /* Set the return CPU to be the same as the request one */
1962         io_req->cpu = smp_processor_id();
1963
1964         /* Set TM flags */
1965         io_req->io_req_flags = QEDF_READ;
1966         io_req->data_xfer_len = 0;
1967         io_req->tm_flags = tm_flags;
1968
1969         /* Default is to return a SCSI command when an error occurs */
1970         io_req->return_scsi_cmd_on_abts = true;
1971
1972         /* Obtain exchange id */
1973         xid = io_req->xid;
1974
1975         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
1976                    "0x%x\n", xid);
1977
1978         /* Initialize task context for this IO request */
1979         task = qedf_get_task_mem(&qedf->tasks, xid);
1980
1981         init_completion(&io_req->tm_done);
1982
1983         spin_lock_irqsave(&fcport->rport_lock, flags);
1984
1985         sqe_idx = qedf_get_sqe_idx(fcport);
1986         sqe = &fcport->sq[sqe_idx];
1987         memset(sqe, 0, sizeof(struct fcoe_wqe));
1988
1989         qedf_init_task(fcport, lport, io_req, task, sqe);
1990         qedf_ring_doorbell(fcport);
1991
1992         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1993
1994         tmo = wait_for_completion_timeout(&io_req->tm_done,
1995             QEDF_TM_TIMEOUT * HZ);
1996
1997         if (!tmo) {
1998                 rc = FAILED;
1999                 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2000         } else {
2001                 /* Check TMF response code */
2002                 if (io_req->fcp_rsp_code == 0)
2003                         rc = SUCCESS;
2004                 else
2005                         rc = FAILED;
2006         }
2007
2008         if (tm_flags == FCP_TMF_LUN_RESET)
2009                 qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
2010         else
2011                 qedf_flush_active_ios(fcport, -1);
2012
2013         kref_put(&io_req->refcount, qedf_release_cmd);
2014
2015         if (rc != SUCCESS) {
2016                 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2017                 rc = FAILED;
2018         } else {
2019                 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2020                 rc = SUCCESS;
2021         }
2022 reset_tmf_err:
2023         return rc;
2024 }
2025
2026 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2027 {
2028         struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2029         struct fc_rport_libfc_priv *rp = rport->dd_data;
2030         struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2031         struct qedf_ctx *qedf;
2032         struct fc_lport *lport;
2033         int rc = SUCCESS;
2034         int rval;
2035
2036         rval = fc_remote_port_chkready(rport);
2037
2038         if (rval) {
2039                 QEDF_ERR(NULL, "device_reset rport not ready\n");
2040                 rc = FAILED;
2041                 goto tmf_err;
2042         }
2043
2044         if (fcport == NULL) {
2045                 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2046                 rc = FAILED;
2047                 goto tmf_err;
2048         }
2049
2050         qedf = fcport->qedf;
2051         lport = qedf->lport;
2052
2053         if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2054             test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2055                 rc = SUCCESS;
2056                 goto tmf_err;
2057         }
2058
2059         if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2060                 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2061                 rc = FAILED;
2062                 goto tmf_err;
2063         }
2064
2065         rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2066
2067 tmf_err:
2068         return rc;
2069 }
2070
2071 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2072         struct qedf_ioreq *io_req)
2073 {
2074         struct fcoe_cqe_rsp_info *fcp_rsp;
2075
2076         fcp_rsp = &cqe->cqe_info.rsp_info;
2077         qedf_parse_fcp_rsp(io_req, fcp_rsp);
2078
2079         io_req->sc_cmd = NULL;
2080         complete(&io_req->tm_done);
2081 }
2082
2083 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2084         struct fcoe_cqe *cqe)
2085 {
2086         unsigned long flags;
2087         uint16_t tmp;
2088         uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2089         u32 payload_len, crc;
2090         struct fc_frame_header *fh;
2091         struct fc_frame *fp;
2092         struct qedf_io_work *io_work;
2093         u32 bdq_idx;
2094         void *bdq_addr;
2095         struct scsi_bd *p_bd_info;
2096
2097         p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2098         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2099                   "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2100                   le32_to_cpu(p_bd_info->address.hi),
2101                   le32_to_cpu(p_bd_info->address.lo),
2102                   le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2103                   le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2104                   qedf->bdq_prod_idx, pktlen);
2105
2106         bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2107         if (bdq_idx >= QEDF_BDQ_SIZE) {
2108                 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2109                     bdq_idx);
2110                 goto increment_prod;
2111         }
2112
2113         bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2114         if (!bdq_addr) {
2115                 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2116                     "unsolicited packet.\n");
2117                 goto increment_prod;
2118         }
2119
2120         if (qedf_dump_frames) {
2121                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2122                     "BDQ frame is at addr=%p.\n", bdq_addr);
2123                 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2124                     (void *)bdq_addr, pktlen, false);
2125         }
2126
2127         /* Allocate frame */
2128         payload_len = pktlen - sizeof(struct fc_frame_header);
2129         fp = fc_frame_alloc(qedf->lport, payload_len);
2130         if (!fp) {
2131                 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2132                 goto increment_prod;
2133         }
2134
2135         /* Copy data from BDQ buffer into fc_frame struct */
2136         fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2137         memcpy(fh, (void *)bdq_addr, pktlen);
2138
2139         /* Initialize the frame so libfc sees it as a valid frame */
2140         crc = fcoe_fc_crc(fp);
2141         fc_frame_init(fp);
2142         fr_dev(fp) = qedf->lport;
2143         fr_sof(fp) = FC_SOF_I3;
2144         fr_eof(fp) = FC_EOF_T;
2145         fr_crc(fp) = cpu_to_le32(~crc);
2146
2147         /*
2148          * We need to return the frame back up to libfc in a non-atomic
2149          * context
2150          */
2151         io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2152         if (!io_work) {
2153                 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2154                            "work for I/O completion.\n");
2155                 fc_frame_free(fp);
2156                 goto increment_prod;
2157         }
2158         memset(io_work, 0, sizeof(struct qedf_io_work));
2159
2160         INIT_WORK(&io_work->work, qedf_fp_io_handler);
2161
2162         /* Copy contents of CQE for deferred processing */
2163         memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2164
2165         io_work->qedf = qedf;
2166         io_work->fp = fp;
2167
2168         queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2169 increment_prod:
2170         spin_lock_irqsave(&qedf->hba_lock, flags);
2171
2172         /* Increment producer to let f/w know we've handled the frame */
2173         qedf->bdq_prod_idx++;
2174
2175         /* Producer index wraps at uint16_t boundary */
2176         if (qedf->bdq_prod_idx == 0xffff)
2177                 qedf->bdq_prod_idx = 0;
2178
2179         writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2180         tmp = readw(qedf->bdq_primary_prod);
2181         writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2182         tmp = readw(qedf->bdq_secondary_prod);
2183
2184         spin_unlock_irqrestore(&qedf->hba_lock, flags);
2185 }