GNU Linux-libre 4.14.251-gnu1
[releases.git] / drivers / scsi / qla2xxx / qla_nvme.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2017 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_nvme.h"
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
12
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14
15 static void qla_nvme_unregister_remote_port(struct work_struct *);
16
17 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
18 {
19         struct nvme_rport *rport;
20         int ret;
21
22         if (!IS_ENABLED(CONFIG_NVME_FC))
23                 return 0;
24
25         if (fcport->nvme_flag & NVME_FLAG_REGISTERED)
26                 return 0;
27
28         if (!vha->flags.nvme_enabled) {
29                 ql_log(ql_log_info, vha, 0x2100,
30                     "%s: Not registering target since Host NVME is not enabled\n",
31                     __func__);
32                 return 0;
33         }
34
35         if (!(fcport->nvme_prli_service_param &
36             (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)))
37                 return 0;
38
39         INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
40         rport = kzalloc(sizeof(*rport), GFP_KERNEL);
41         if (!rport) {
42                 ql_log(ql_log_warn, vha, 0x2101,
43                     "%s: unable to alloc memory\n", __func__);
44                 return -ENOMEM;
45         }
46
47         rport->req.port_name = wwn_to_u64(fcport->port_name);
48         rport->req.node_name = wwn_to_u64(fcport->node_name);
49         rport->req.port_role = 0;
50
51         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
52                 rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
53
54         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
55                 rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET;
56
57         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
58                 rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
59
60         rport->req.port_id = fcport->d_id.b24;
61
62         ql_log(ql_log_info, vha, 0x2102,
63             "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
64             __func__, rport->req.node_name, rport->req.port_name,
65             rport->req.port_id);
66
67         ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
68             &fcport->nvme_remote_port);
69         if (ret) {
70                 ql_log(ql_log_warn, vha, 0x212e,
71                     "Failed to register remote port. Transport returned %d\n",
72                     ret);
73                 return ret;
74         }
75
76         fcport->nvme_remote_port->private = fcport;
77         fcport->nvme_flag |= NVME_FLAG_REGISTERED;
78         rport->fcport = fcport;
79         list_add_tail(&rport->list, &vha->nvme_rport_list);
80         return 0;
81 }
82
83 /* Allocate a queue for NVMe traffic */
84 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
85     unsigned int qidx, u16 qsize, void **handle)
86 {
87         struct scsi_qla_host *vha;
88         struct qla_hw_data *ha;
89         struct qla_qpair *qpair;
90
91         /* Map admin queue and 1st IO queue to index 0 */
92         if (qidx)
93                 qidx--;
94
95         vha = (struct scsi_qla_host *)lport->private;
96         ha = vha->hw;
97
98         ql_log(ql_log_info, vha, 0x2104,
99             "%s: handle %p, idx =%d, qsize %d\n",
100             __func__, handle, qidx, qsize);
101
102         if (qidx > qla_nvme_fc_transport.max_hw_queues) {
103                 ql_log(ql_log_warn, vha, 0x212f,
104                     "%s: Illegal qidx=%d. Max=%d\n",
105                     __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
106                 return -EINVAL;
107         }
108
109         if (ha->queue_pair_map[qidx]) {
110                 *handle = ha->queue_pair_map[qidx];
111                 ql_log(ql_log_info, vha, 0x2121,
112                     "Returning existing qpair of %p for idx=%x\n",
113                     *handle, qidx);
114                 return 0;
115         }
116
117         ql_log(ql_log_warn, vha, 0xffff,
118             "allocating q for idx=%x w/o cpu mask\n", qidx);
119         qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
120         if (qpair == NULL) {
121                 ql_log(ql_log_warn, vha, 0x2122,
122                     "Failed to allocate qpair\n");
123                 return -EINVAL;
124         }
125         *handle = qpair;
126
127         return 0;
128 }
129
130 static void qla_nvme_sp_ls_done(void *ptr, int res)
131 {
132         srb_t *sp = ptr;
133         struct srb_iocb *nvme;
134         struct nvmefc_ls_req   *fd;
135         struct nvme_private *priv;
136
137         if (atomic_read(&sp->ref_count) == 0) {
138                 ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
139                     "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
140                 return;
141         }
142
143         if (!atomic_dec_and_test(&sp->ref_count))
144                 return;
145
146         if (res)
147                 res = -EINVAL;
148
149         nvme = &sp->u.iocb_cmd;
150         fd = nvme->u.nvme.desc;
151         priv = fd->private;
152         priv->comp_status = res;
153         schedule_work(&priv->ls_work);
154         /* work schedule doesn't need the sp */
155         qla2x00_rel_sp(sp);
156 }
157
158 void qla_nvme_cmpl_io(struct srb_iocb *nvme)
159 {
160         srb_t *sp;
161         struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
162
163         sp = container_of(nvme, srb_t, u.iocb_cmd);
164         fd->done(fd);
165         qla2xxx_rel_qpair_sp(sp->qpair, sp);
166 }
167
168 static void qla_nvme_sp_done(void *ptr, int res)
169 {
170         srb_t *sp = ptr;
171         struct srb_iocb *nvme;
172         struct nvmefc_fcp_req *fd;
173
174         nvme = &sp->u.iocb_cmd;
175         fd = nvme->u.nvme.desc;
176
177         if (!atomic_dec_and_test(&sp->ref_count))
178                 return;
179
180         if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
181                 goto rel;
182
183         if (unlikely(res == QLA_FUNCTION_FAILED))
184                 fd->status = NVME_SC_INTERNAL;
185         else
186                 fd->status = 0;
187
188         fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
189         list_add_tail(&nvme->u.nvme.entry, &sp->qpair->nvme_done_list);
190         return;
191 rel:
192         qla2xxx_rel_qpair_sp(sp->qpair, sp);
193 }
194
195 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
196     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
197 {
198         struct nvme_private *priv = fd->private;
199         fc_port_t *fcport = rport->private;
200         srb_t *sp = priv->sp;
201         int rval;
202         struct qla_hw_data *ha = fcport->vha->hw;
203
204         rval = ha->isp_ops->abort_command(sp);
205
206         ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
207             "%s: %s LS command for sp=%p on fcport=%p rval=%x\n", __func__,
208             (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
209             sp, fcport, rval);
210 }
211
212 static void qla_nvme_ls_complete(struct work_struct *work)
213 {
214         struct nvme_private *priv =
215             container_of(work, struct nvme_private, ls_work);
216         struct nvmefc_ls_req *fd = priv->fd;
217
218         fd->done(fd, priv->comp_status);
219 }
220
221 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
222     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
223 {
224         fc_port_t *fcport = rport->private;
225         struct srb_iocb   *nvme;
226         struct nvme_private *priv = fd->private;
227         struct scsi_qla_host *vha;
228         int     rval = QLA_FUNCTION_FAILED;
229         struct qla_hw_data *ha;
230         srb_t           *sp;
231
232         if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
233                 return rval;
234
235         vha = fcport->vha;
236         ha = vha->hw;
237         /* Alloc SRB structure */
238         sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
239         if (!sp)
240                 return rval;
241
242         sp->type = SRB_NVME_LS;
243         sp->name = "nvme_ls";
244         sp->done = qla_nvme_sp_ls_done;
245         atomic_set(&sp->ref_count, 1);
246         nvme = &sp->u.iocb_cmd;
247         priv->sp = sp;
248         priv->fd = fd;
249         INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
250         nvme->u.nvme.desc = fd;
251         nvme->u.nvme.dir = 0;
252         nvme->u.nvme.dl = 0;
253         nvme->u.nvme.cmd_len = fd->rqstlen;
254         nvme->u.nvme.rsp_len = fd->rsplen;
255         nvme->u.nvme.rsp_dma = fd->rspdma;
256         nvme->u.nvme.timeout_sec = fd->timeout;
257         nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
258             fd->rqstlen, DMA_TO_DEVICE);
259         dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
260             fd->rqstlen, DMA_TO_DEVICE);
261
262         rval = qla2x00_start_sp(sp);
263         if (rval != QLA_SUCCESS) {
264                 ql_log(ql_log_warn, vha, 0x700e,
265                     "qla2x00_start_sp failed = %d\n", rval);
266                 atomic_dec(&sp->ref_count);
267                 wake_up(&sp->nvme_ls_waitq);
268                 return rval;
269         }
270
271         return rval;
272 }
273
274 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
275     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
276     struct nvmefc_fcp_req *fd)
277 {
278         struct nvme_private *priv = fd->private;
279         srb_t *sp = priv->sp;
280         int rval;
281         fc_port_t *fcport = rport->private;
282         struct qla_hw_data *ha = fcport->vha->hw;
283
284         rval = ha->isp_ops->abort_command(sp);
285
286         ql_dbg(ql_dbg_io, fcport->vha, 0x2127,
287             "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__,
288             (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
289             sp, fcport, rval);
290 }
291
292 static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
293 {
294         struct scsi_qla_host *vha = lport->private;
295         unsigned long flags;
296         struct qla_qpair *qpair = hw_queue_handle;
297
298         /* Acquire ring specific lock */
299         spin_lock_irqsave(&qpair->qp_lock, flags);
300         qla24xx_process_response_queue(vha, qpair->rsp);
301         spin_unlock_irqrestore(&qpair->qp_lock, flags);
302 }
303
304 static int qla2x00_start_nvme_mq(srb_t *sp)
305 {
306         unsigned long   flags;
307         uint32_t        *clr_ptr;
308         uint32_t        index;
309         uint32_t        handle;
310         struct cmd_nvme *cmd_pkt;
311         uint16_t        cnt, i;
312         uint16_t        req_cnt;
313         uint16_t        tot_dsds;
314         uint16_t        avail_dsds;
315         uint32_t        *cur_dsd;
316         struct req_que *req = NULL;
317         struct rsp_que *rsp = NULL;
318         struct scsi_qla_host *vha = sp->fcport->vha;
319         struct qla_hw_data *ha = vha->hw;
320         struct qla_qpair *qpair = sp->qpair;
321         struct srb_iocb *nvme = &sp->u.iocb_cmd;
322         struct scatterlist *sgl, *sg;
323         struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
324         uint32_t        rval = QLA_SUCCESS;
325
326         tot_dsds = fd->sg_cnt;
327
328         /* Acquire qpair specific lock */
329         spin_lock_irqsave(&qpair->qp_lock, flags);
330
331         /* Setup qpair pointers */
332         req = qpair->req;
333         rsp = qpair->rsp;
334
335         /* Check for room in outstanding command list. */
336         handle = req->current_outstanding_cmd;
337         for (index = 1; index < req->num_outstanding_cmds; index++) {
338                 handle++;
339                 if (handle == req->num_outstanding_cmds)
340                         handle = 1;
341                 if (!req->outstanding_cmds[handle])
342                         break;
343         }
344
345         if (index == req->num_outstanding_cmds) {
346                 rval = -1;
347                 goto queuing_error;
348         }
349         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
350         if (req->cnt < (req_cnt + 2)) {
351                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
352                     RD_REG_DWORD_RELAXED(req->req_q_out);
353
354                 if (req->ring_index < cnt)
355                         req->cnt = cnt - req->ring_index;
356                 else
357                         req->cnt = req->length - (req->ring_index - cnt);
358
359                 if (req->cnt < (req_cnt + 2)){
360                         rval = -1;
361                         goto queuing_error;
362                 }
363         }
364
365         if (unlikely(!fd->sqid)) {
366                 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
367                 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
368                         nvme->u.nvme.aen_op = 1;
369                         atomic_inc(&vha->hw->nvme_active_aen_cnt);
370                 }
371         }
372
373         /* Build command packet. */
374         req->current_outstanding_cmd = handle;
375         req->outstanding_cmds[handle] = sp;
376         sp->handle = handle;
377         req->cnt -= req_cnt;
378
379         cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
380         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
381
382         /* Zero out remaining portion of packet. */
383         clr_ptr = (uint32_t *)cmd_pkt + 2;
384         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
385
386         cmd_pkt->entry_status = 0;
387
388         /* Update entry type to indicate Command NVME IOCB */
389         cmd_pkt->entry_type = COMMAND_NVME;
390
391         /* No data transfer how do we check buffer len == 0?? */
392         if (fd->io_dir == NVMEFC_FCP_READ) {
393                 cmd_pkt->control_flags =
394                     cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
395                 vha->qla_stats.input_bytes += fd->payload_length;
396                 vha->qla_stats.input_requests++;
397         } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
398                 cmd_pkt->control_flags =
399                     cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
400                 vha->qla_stats.output_bytes += fd->payload_length;
401                 vha->qla_stats.output_requests++;
402         } else if (fd->io_dir == 0) {
403                 cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
404         }
405
406         /* Set NPORT-ID */
407         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
408         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
409         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
410         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
411         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
412
413         /* NVME RSP IU */
414         cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
415         cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
416         cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
417
418         /* NVME CNMD IU */
419         cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
420         cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
421         cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
422
423         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
424         cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
425
426         /* One DSD is available in the Command Type NVME IOCB */
427         avail_dsds = 1;
428         cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
429         sgl = fd->first_sgl;
430
431         /* Load data segments */
432         for_each_sg(sgl, sg, tot_dsds, i) {
433                 dma_addr_t      sle_dma;
434                 cont_a64_entry_t *cont_pkt;
435
436                 /* Allocate additional continuation packets? */
437                 if (avail_dsds == 0) {
438                         /*
439                          * Five DSDs are available in the Continuation
440                          * Type 1 IOCB.
441                          */
442
443                         /* Adjust ring index */
444                         req->ring_index++;
445                         if (req->ring_index == req->length) {
446                                 req->ring_index = 0;
447                                 req->ring_ptr = req->ring;
448                         } else {
449                                 req->ring_ptr++;
450                         }
451                         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
452                         *((uint32_t *)(&cont_pkt->entry_type)) =
453                             cpu_to_le32(CONTINUE_A64_TYPE);
454
455                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
456                         avail_dsds = 5;
457                 }
458
459                 sle_dma = sg_dma_address(sg);
460                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
461                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
462                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
463                 avail_dsds--;
464         }
465
466         /* Set total entry count. */
467         cmd_pkt->entry_count = (uint8_t)req_cnt;
468         wmb();
469
470         /* Adjust ring index. */
471         req->ring_index++;
472         if (req->ring_index == req->length) {
473                 req->ring_index = 0;
474                 req->ring_ptr = req->ring;
475         } else {
476                 req->ring_ptr++;
477         }
478
479         /* Set chip new ring index. */
480         WRT_REG_DWORD(req->req_q_in, req->ring_index);
481
482         /* Manage unprocessed RIO/ZIO commands in response queue. */
483         if (vha->flags.process_response_queue &&
484             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
485                 qla24xx_process_response_queue(vha, rsp);
486
487 queuing_error:
488         spin_unlock_irqrestore(&qpair->qp_lock, flags);
489         return rval;
490 }
491
492 /* Post a command */
493 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
494     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
495     struct nvmefc_fcp_req *fd)
496 {
497         fc_port_t *fcport;
498         struct srb_iocb *nvme;
499         struct scsi_qla_host *vha;
500         int rval = QLA_FUNCTION_FAILED;
501         srb_t *sp;
502         struct qla_qpair *qpair = hw_queue_handle;
503         struct nvme_private *priv;
504
505         if (!fd) {
506                 ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n");
507                 return rval;
508         }
509
510         priv = fd->private;
511         fcport = rport->private;
512         if (!fcport) {
513                 ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
514                 return rval;
515         }
516
517         vha = fcport->vha;
518         if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)))
519                 return -EBUSY;
520
521         /* Alloc SRB structure */
522         sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
523         if (!sp)
524                 return -EIO;
525
526         atomic_set(&sp->ref_count, 1);
527         init_waitqueue_head(&sp->nvme_ls_waitq);
528         priv->sp = sp;
529         sp->type = SRB_NVME_CMD;
530         sp->name = "nvme_cmd";
531         sp->done = qla_nvme_sp_done;
532         sp->qpair = qpair;
533         nvme = &sp->u.iocb_cmd;
534         nvme->u.nvme.desc = fd;
535
536         rval = qla2x00_start_nvme_mq(sp);
537         if (rval != QLA_SUCCESS) {
538                 ql_log(ql_log_warn, vha, 0x212d,
539                     "qla2x00_start_nvme_mq failed = %d\n", rval);
540                 atomic_dec(&sp->ref_count);
541                 wake_up(&sp->nvme_ls_waitq);
542                 return -EIO;
543         }
544
545         return rval;
546 }
547
548 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
549 {
550         struct scsi_qla_host *vha = lport->private;
551
552         ql_log(ql_log_info, vha, 0x210f,
553             "localport delete of %p completed.\n", vha->nvme_local_port);
554         vha->nvme_local_port = NULL;
555         complete(&vha->nvme_del_done);
556 }
557
558 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
559 {
560         fc_port_t *fcport;
561         struct nvme_rport *r_port, *trport;
562
563         fcport = rport->private;
564         fcport->nvme_remote_port = NULL;
565         fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
566
567         list_for_each_entry_safe(r_port, trport,
568             &fcport->vha->nvme_rport_list, list) {
569                 if (r_port->fcport == fcport) {
570                         list_del(&r_port->list);
571                         break;
572                 }
573         }
574         kfree(r_port);
575         complete(&fcport->nvme_del_done);
576
577         ql_log(ql_log_info, fcport->vha, 0x2110,
578             "remoteport_delete of %p completed.\n", fcport);
579 }
580
581 static struct nvme_fc_port_template qla_nvme_fc_transport = {
582         .localport_delete = qla_nvme_localport_delete,
583         .remoteport_delete = qla_nvme_remoteport_delete,
584         .create_queue   = qla_nvme_alloc_queue,
585         .delete_queue   = NULL,
586         .ls_req         = qla_nvme_ls_req,
587         .ls_abort       = qla_nvme_ls_abort,
588         .fcp_io         = qla_nvme_post_cmd,
589         .fcp_abort      = qla_nvme_fcp_abort,
590         .poll_queue     = qla_nvme_poll,
591         .max_hw_queues  = 8,
592         .max_sgl_segments = 128,
593         .max_dif_sgl_segments = 64,
594         .dma_boundary = 0xFFFFFFFF,
595         .local_priv_sz  = 8,
596         .remote_priv_sz = 0,
597         .lsrqst_priv_sz = sizeof(struct nvme_private),
598         .fcprqst_priv_sz = sizeof(struct nvme_private),
599 };
600
601 #define NVME_ABORT_POLLING_PERIOD    2
602 static int qla_nvme_wait_on_command(srb_t *sp)
603 {
604         int ret = QLA_SUCCESS;
605
606         wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
607             NVME_ABORT_POLLING_PERIOD*HZ);
608
609         if (atomic_read(&sp->ref_count) > 1)
610                 ret = QLA_FUNCTION_FAILED;
611
612         return ret;
613 }
614
615 static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
616 {
617         int ret = QLA_SUCCESS;
618         int timeout;
619
620         timeout = wait_for_completion_timeout(&fcport->nvme_del_done,
621             msecs_to_jiffies(2000));
622         if (!timeout) {
623                 ret = QLA_FUNCTION_FAILED;
624                 ql_log(ql_log_info, fcport->vha, 0x2111,
625                     "timed out waiting for fcport=%p to delete\n", fcport);
626         }
627
628         return ret;
629 }
630
631 void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp)
632 {
633         int rval;
634
635         rval = ha->isp_ops->abort_command(sp);
636         if (!rval && !qla_nvme_wait_on_command(sp))
637                 ql_log(ql_log_warn, NULL, 0x2112,
638                     "nvme_wait_on_comand timed out waiting on sp=%p\n", sp);
639 }
640
641 static void qla_nvme_unregister_remote_port(struct work_struct *work)
642 {
643         struct fc_port *fcport = container_of(work, struct fc_port,
644             nvme_del_work);
645         struct nvme_rport *rport, *trport;
646
647         if (!IS_ENABLED(CONFIG_NVME_FC))
648                 return;
649
650         ql_log(ql_log_warn, NULL, 0x2112,
651             "%s: unregister remoteport on %p\n",__func__, fcport);
652
653         list_for_each_entry_safe(rport, trport,
654             &fcport->vha->nvme_rport_list, list) {
655                 if (rport->fcport == fcport) {
656                         ql_log(ql_log_info, fcport->vha, 0x2113,
657                             "%s: fcport=%p\n", __func__, fcport);
658                         init_completion(&fcport->nvme_del_done);
659                         nvme_fc_unregister_remoteport(
660                             fcport->nvme_remote_port);
661                         qla_nvme_wait_on_rport_del(fcport);
662                 }
663         }
664 }
665
666 void qla_nvme_delete(struct scsi_qla_host *vha)
667 {
668         struct nvme_rport *rport, *trport;
669         fc_port_t *fcport;
670         int nv_ret;
671
672         if (!IS_ENABLED(CONFIG_NVME_FC))
673                 return;
674
675         list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) {
676                 fcport = rport->fcport;
677
678                 ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
679                     __func__, fcport);
680
681                 init_completion(&fcport->nvme_del_done);
682                 nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
683                 qla_nvme_wait_on_rport_del(fcport);
684         }
685
686         if (vha->nvme_local_port) {
687                 init_completion(&vha->nvme_del_done);
688                 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
689                 if (nv_ret == 0)
690                         ql_log(ql_log_info, vha, 0x2116,
691                             "unregistered localport=%p\n",
692                             vha->nvme_local_port);
693                 else
694                         ql_log(ql_log_info, vha, 0x2115,
695                             "Unregister of localport failed\n");
696                 wait_for_completion_timeout(&vha->nvme_del_done,
697                     msecs_to_jiffies(5000));
698         }
699 }
700
701 void qla_nvme_register_hba(struct scsi_qla_host *vha)
702 {
703         struct nvme_fc_port_template *tmpl;
704         struct qla_hw_data *ha;
705         struct nvme_fc_port_info pinfo;
706         int ret;
707
708         if (!IS_ENABLED(CONFIG_NVME_FC))
709                 return;
710
711         ha = vha->hw;
712         tmpl = &qla_nvme_fc_transport;
713
714         WARN_ON(vha->nvme_local_port);
715         WARN_ON(ha->max_req_queues < 3);
716
717         qla_nvme_fc_transport.max_hw_queues =
718             min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
719                 (uint8_t)(ha->max_req_queues - 2));
720
721         pinfo.node_name = wwn_to_u64(vha->node_name);
722         pinfo.port_name = wwn_to_u64(vha->port_name);
723         pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
724         pinfo.port_id = vha->d_id.b24;
725
726         ql_log(ql_log_info, vha, 0xffff,
727             "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
728             pinfo.node_name, pinfo.port_name, pinfo.port_id);
729         qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
730
731         ret = nvme_fc_register_localport(&pinfo, tmpl,
732             get_device(&ha->pdev->dev), &vha->nvme_local_port);
733         if (ret) {
734                 ql_log(ql_log_warn, vha, 0xffff,
735                     "register_localport failed: ret=%x\n", ret);
736                 return;
737         }
738         vha->nvme_local_port->private = vha;
739 }