GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / scsi / qla2xxx / qla_nvme.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11
12 static struct nvme_fc_port_template qla_nvme_fc_transport;
13
14 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
15 {
16         struct qla_nvme_rport *rport;
17         struct nvme_fc_port_info req;
18         int ret;
19
20         if (!IS_ENABLED(CONFIG_NVME_FC))
21                 return 0;
22
23         if (!vha->flags.nvme_enabled) {
24                 ql_log(ql_log_info, vha, 0x2100,
25                     "%s: Not registering target since Host NVME is not enabled\n",
26                     __func__);
27                 return 0;
28         }
29
30         if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
31                 return 0;
32
33         if (!(fcport->nvme_prli_service_param &
34             (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35                 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
36                 return 0;
37
38         fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
39
40         memset(&req, 0, sizeof(struct nvme_fc_port_info));
41         req.port_name = wwn_to_u64(fcport->port_name);
42         req.node_name = wwn_to_u64(fcport->node_name);
43         req.port_role = 0;
44         req.dev_loss_tmo = 0;
45
46         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
47                 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
48
49         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
50                 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
51
52         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
53                 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
54
55         req.port_id = fcport->d_id.b24;
56
57         ql_log(ql_log_info, vha, 0x2102,
58             "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
59             __func__, req.node_name, req.port_name,
60             req.port_id);
61
62         ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
63             &fcport->nvme_remote_port);
64         if (ret) {
65                 ql_log(ql_log_warn, vha, 0x212e,
66                     "Failed to register remote port. Transport returned %d\n",
67                     ret);
68                 return ret;
69         }
70
71         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
72                 ql_log(ql_log_info, vha, 0x212a,
73                        "PortID:%06x Supports SLER\n", req.port_id);
74
75         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
76                 ql_log(ql_log_info, vha, 0x212b,
77                        "PortID:%06x Supports PI control\n", req.port_id);
78
79         rport = fcport->nvme_remote_port->private;
80         rport->fcport = fcport;
81
82         fcport->nvme_flag |= NVME_FLAG_REGISTERED;
83         return 0;
84 }
85
86 /* Allocate a queue for NVMe traffic */
87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
88     unsigned int qidx, u16 qsize, void **handle)
89 {
90         struct scsi_qla_host *vha;
91         struct qla_hw_data *ha;
92         struct qla_qpair *qpair;
93
94         /* Map admin queue and 1st IO queue to index 0 */
95         if (qidx)
96                 qidx--;
97
98         vha = (struct scsi_qla_host *)lport->private;
99         ha = vha->hw;
100
101         ql_log(ql_log_info, vha, 0x2104,
102             "%s: handle %p, idx =%d, qsize %d\n",
103             __func__, handle, qidx, qsize);
104
105         if (qidx > qla_nvme_fc_transport.max_hw_queues) {
106                 ql_log(ql_log_warn, vha, 0x212f,
107                     "%s: Illegal qidx=%d. Max=%d\n",
108                     __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
109                 return -EINVAL;
110         }
111
112         /* Use base qpair if max_qpairs is 0 */
113         if (!ha->max_qpairs) {
114                 qpair = ha->base_qpair;
115         } else {
116                 if (ha->queue_pair_map[qidx]) {
117                         *handle = ha->queue_pair_map[qidx];
118                         ql_log(ql_log_info, vha, 0x2121,
119                                "Returning existing qpair of %p for idx=%x\n",
120                                *handle, qidx);
121                         return 0;
122                 }
123
124                 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
125                 if (!qpair) {
126                         ql_log(ql_log_warn, vha, 0x2122,
127                                "Failed to allocate qpair\n");
128                         return -EINVAL;
129                 }
130         }
131         *handle = qpair;
132
133         return 0;
134 }
135
136 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
137 {
138         struct srb *sp = container_of(kref, struct srb, cmd_kref);
139         struct nvme_private *priv = (struct nvme_private *)sp->priv;
140         struct nvmefc_fcp_req *fd;
141         struct srb_iocb *nvme;
142         unsigned long flags;
143
144         if (!priv)
145                 goto out;
146
147         nvme = &sp->u.iocb_cmd;
148         fd = nvme->u.nvme.desc;
149
150         spin_lock_irqsave(&priv->cmd_lock, flags);
151         priv->sp = NULL;
152         sp->priv = NULL;
153         if (priv->comp_status == QLA_SUCCESS) {
154                 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
155                 fd->status = NVME_SC_SUCCESS;
156         } else {
157                 fd->rcv_rsplen = 0;
158                 fd->transferred_length = 0;
159                 fd->status = NVME_SC_INTERNAL;
160         }
161         spin_unlock_irqrestore(&priv->cmd_lock, flags);
162
163         fd->done(fd);
164 out:
165         qla2xxx_rel_qpair_sp(sp->qpair, sp);
166 }
167
168 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
169 {
170         struct srb *sp = container_of(kref, struct srb, cmd_kref);
171         struct nvme_private *priv = (struct nvme_private *)sp->priv;
172         struct nvmefc_ls_req *fd;
173         unsigned long flags;
174
175         if (!priv)
176                 goto out;
177
178         spin_lock_irqsave(&priv->cmd_lock, flags);
179         priv->sp = NULL;
180         sp->priv = NULL;
181         spin_unlock_irqrestore(&priv->cmd_lock, flags);
182
183         fd = priv->fd;
184
185         fd->done(fd, priv->comp_status);
186 out:
187         qla2x00_rel_sp(sp);
188 }
189
190 static void qla_nvme_ls_complete(struct work_struct *work)
191 {
192         struct nvme_private *priv =
193                 container_of(work, struct nvme_private, ls_work);
194
195         kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
196 }
197
198 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
199 {
200         struct nvme_private *priv = sp->priv;
201
202         if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
203                 return;
204
205         if (res)
206                 res = -EINVAL;
207
208         priv->comp_status = res;
209         INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
210         schedule_work(&priv->ls_work);
211 }
212
213 /* it assumed that QPair lock is held. */
214 static void qla_nvme_sp_done(srb_t *sp, int res)
215 {
216         struct nvme_private *priv = sp->priv;
217
218         priv->comp_status = res;
219         kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
220
221         return;
222 }
223
224 static void qla_nvme_abort_work(struct work_struct *work)
225 {
226         struct nvme_private *priv =
227                 container_of(work, struct nvme_private, abort_work);
228         srb_t *sp = priv->sp;
229         fc_port_t *fcport = sp->fcport;
230         struct qla_hw_data *ha = fcport->vha->hw;
231         int rval;
232
233         ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
234                "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
235                __func__, sp, sp->handle, fcport, fcport->deleted);
236
237         if (!ha->flags.fw_started && fcport->deleted)
238                 goto out;
239
240         if (ha->flags.host_shutting_down) {
241                 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
242                     "%s Calling done on sp: %p, type: 0x%x\n",
243                     __func__, sp, sp->type);
244                 sp->done(sp, 0);
245                 goto out;
246         }
247
248         rval = ha->isp_ops->abort_command(sp);
249
250         ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
251             "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
252             __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
253             sp, sp->handle, fcport, rval);
254
255 out:
256         /* kref_get was done before work was schedule. */
257         kref_put(&sp->cmd_kref, sp->put_fn);
258 }
259
260 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
261     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
262 {
263         struct nvme_private *priv = fd->private;
264         unsigned long flags;
265
266         spin_lock_irqsave(&priv->cmd_lock, flags);
267         if (!priv->sp) {
268                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
269                 return;
270         }
271
272         if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
273                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
274                 return;
275         }
276         spin_unlock_irqrestore(&priv->cmd_lock, flags);
277
278         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
279         schedule_work(&priv->abort_work);
280 }
281
282 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
283     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
284 {
285         struct qla_nvme_rport *qla_rport = rport->private;
286         fc_port_t *fcport = qla_rport->fcport;
287         struct srb_iocb   *nvme;
288         struct nvme_private *priv = fd->private;
289         struct scsi_qla_host *vha;
290         int     rval = QLA_FUNCTION_FAILED;
291         struct qla_hw_data *ha;
292         srb_t           *sp;
293
294
295         if (!fcport || (fcport && fcport->deleted))
296                 return rval;
297
298         vha = fcport->vha;
299         ha = vha->hw;
300
301         if (!ha->flags.fw_started)
302                 return rval;
303
304         /* Alloc SRB structure */
305         sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
306         if (!sp)
307                 return rval;
308
309         sp->type = SRB_NVME_LS;
310         sp->name = "nvme_ls";
311         sp->done = qla_nvme_sp_ls_done;
312         sp->put_fn = qla_nvme_release_ls_cmd_kref;
313         sp->priv = priv;
314         priv->sp = sp;
315         kref_init(&sp->cmd_kref);
316         spin_lock_init(&priv->cmd_lock);
317         nvme = &sp->u.iocb_cmd;
318         priv->fd = fd;
319         nvme->u.nvme.desc = fd;
320         nvme->u.nvme.dir = 0;
321         nvme->u.nvme.dl = 0;
322         nvme->u.nvme.cmd_len = fd->rqstlen;
323         nvme->u.nvme.rsp_len = fd->rsplen;
324         nvme->u.nvme.rsp_dma = fd->rspdma;
325         nvme->u.nvme.timeout_sec = fd->timeout;
326         nvme->u.nvme.cmd_dma = fd->rqstdma;
327         dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
328             fd->rqstlen, DMA_TO_DEVICE);
329
330         rval = qla2x00_start_sp(sp);
331         if (rval != QLA_SUCCESS) {
332                 ql_log(ql_log_warn, vha, 0x700e,
333                     "qla2x00_start_sp failed = %d\n", rval);
334                 sp->priv = NULL;
335                 priv->sp = NULL;
336                 qla2x00_rel_sp(sp);
337                 return rval;
338         }
339
340         return rval;
341 }
342
343 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
344     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
345     struct nvmefc_fcp_req *fd)
346 {
347         struct nvme_private *priv = fd->private;
348         unsigned long flags;
349
350         spin_lock_irqsave(&priv->cmd_lock, flags);
351         if (!priv->sp) {
352                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
353                 return;
354         }
355         if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
356                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
357                 return;
358         }
359         spin_unlock_irqrestore(&priv->cmd_lock, flags);
360
361         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
362         schedule_work(&priv->abort_work);
363 }
364
365 static inline int qla2x00_start_nvme_mq(srb_t *sp)
366 {
367         unsigned long   flags;
368         uint32_t        *clr_ptr;
369         uint32_t        handle;
370         struct cmd_nvme *cmd_pkt;
371         uint16_t        cnt, i;
372         uint16_t        req_cnt;
373         uint16_t        tot_dsds;
374         uint16_t        avail_dsds;
375         struct dsd64    *cur_dsd;
376         struct req_que *req = NULL;
377         struct scsi_qla_host *vha = sp->fcport->vha;
378         struct qla_hw_data *ha = vha->hw;
379         struct qla_qpair *qpair = sp->qpair;
380         struct srb_iocb *nvme = &sp->u.iocb_cmd;
381         struct scatterlist *sgl, *sg;
382         struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
383         struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
384         uint32_t        rval = QLA_SUCCESS;
385
386         /* Setup qpair pointers */
387         req = qpair->req;
388         tot_dsds = fd->sg_cnt;
389
390         /* Acquire qpair specific lock */
391         spin_lock_irqsave(&qpair->qp_lock, flags);
392
393         handle = qla2xxx_get_next_handle(req);
394         if (handle == 0) {
395                 rval = -EBUSY;
396                 goto queuing_error;
397         }
398         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
399         if (req->cnt < (req_cnt + 2)) {
400                 if (IS_SHADOW_REG_CAPABLE(ha)) {
401                         cnt = *req->out_ptr;
402                 } else {
403                         cnt = rd_reg_dword_relaxed(req->req_q_out);
404                         if (qla2x00_check_reg16_for_disconnect(vha, cnt))
405                                 goto queuing_error;
406                 }
407
408                 if (req->ring_index < cnt)
409                         req->cnt = cnt - req->ring_index;
410                 else
411                         req->cnt = req->length - (req->ring_index - cnt);
412
413                 if (req->cnt < (req_cnt + 2)){
414                         rval = -EBUSY;
415                         goto queuing_error;
416                 }
417         }
418
419         if (unlikely(!fd->sqid)) {
420                 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
421                         nvme->u.nvme.aen_op = 1;
422                         atomic_inc(&ha->nvme_active_aen_cnt);
423                 }
424         }
425
426         /* Build command packet. */
427         req->current_outstanding_cmd = handle;
428         req->outstanding_cmds[handle] = sp;
429         sp->handle = handle;
430         req->cnt -= req_cnt;
431
432         cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
433         cmd_pkt->handle = make_handle(req->id, handle);
434
435         /* Zero out remaining portion of packet. */
436         clr_ptr = (uint32_t *)cmd_pkt + 2;
437         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
438
439         cmd_pkt->entry_status = 0;
440
441         /* Update entry type to indicate Command NVME IOCB */
442         cmd_pkt->entry_type = COMMAND_NVME;
443
444         /* No data transfer how do we check buffer len == 0?? */
445         if (fd->io_dir == NVMEFC_FCP_READ) {
446                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
447                 qpair->counters.input_bytes += fd->payload_length;
448                 qpair->counters.input_requests++;
449         } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
450                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
451                 if ((vha->flags.nvme_first_burst) &&
452                     (sp->fcport->nvme_prli_service_param &
453                         NVME_PRLI_SP_FIRST_BURST)) {
454                         if ((fd->payload_length <=
455                             sp->fcport->nvme_first_burst_size) ||
456                                 (sp->fcport->nvme_first_burst_size == 0))
457                                 cmd_pkt->control_flags |=
458                                         cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
459                 }
460                 qpair->counters.output_bytes += fd->payload_length;
461                 qpair->counters.output_requests++;
462         } else if (fd->io_dir == 0) {
463                 cmd_pkt->control_flags = 0;
464         }
465         /* Set BIT_13 of control flags for Async event */
466         if (vha->flags.nvme2_enabled &&
467             cmd->sqe.common.opcode == nvme_admin_async_event) {
468                 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
469         }
470
471         /* Set NPORT-ID */
472         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
473         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
474         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
475         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
476         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
477
478         /* NVME RSP IU */
479         cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
480         put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
481
482         /* NVME CNMD IU */
483         cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
484         cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
485
486         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
487         cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
488
489         /* One DSD is available in the Command Type NVME IOCB */
490         avail_dsds = 1;
491         cur_dsd = &cmd_pkt->nvme_dsd;
492         sgl = fd->first_sgl;
493
494         /* Load data segments */
495         for_each_sg(sgl, sg, tot_dsds, i) {
496                 cont_a64_entry_t *cont_pkt;
497
498                 /* Allocate additional continuation packets? */
499                 if (avail_dsds == 0) {
500                         /*
501                          * Five DSDs are available in the Continuation
502                          * Type 1 IOCB.
503                          */
504
505                         /* Adjust ring index */
506                         req->ring_index++;
507                         if (req->ring_index == req->length) {
508                                 req->ring_index = 0;
509                                 req->ring_ptr = req->ring;
510                         } else {
511                                 req->ring_ptr++;
512                         }
513                         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
514                         put_unaligned_le32(CONTINUE_A64_TYPE,
515                                            &cont_pkt->entry_type);
516
517                         cur_dsd = cont_pkt->dsd;
518                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
519                 }
520
521                 append_dsd64(&cur_dsd, sg);
522                 avail_dsds--;
523         }
524
525         /* Set total entry count. */
526         cmd_pkt->entry_count = (uint8_t)req_cnt;
527         wmb();
528
529         /* Adjust ring index. */
530         req->ring_index++;
531         if (req->ring_index == req->length) {
532                 req->ring_index = 0;
533                 req->ring_ptr = req->ring;
534         } else {
535                 req->ring_ptr++;
536         }
537
538         /* Set chip new ring index. */
539         wrt_reg_dword(req->req_q_in, req->ring_index);
540
541 queuing_error:
542         spin_unlock_irqrestore(&qpair->qp_lock, flags);
543
544         return rval;
545 }
546
547 /* Post a command */
548 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
549     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
550     struct nvmefc_fcp_req *fd)
551 {
552         fc_port_t *fcport;
553         struct srb_iocb *nvme;
554         struct scsi_qla_host *vha;
555         int rval;
556         srb_t *sp;
557         struct qla_qpair *qpair = hw_queue_handle;
558         struct nvme_private *priv = fd->private;
559         struct qla_nvme_rport *qla_rport = rport->private;
560
561         if (!priv) {
562                 /* nvme association has been torn down */
563                 return -ENODEV;
564         }
565
566         fcport = qla_rport->fcport;
567
568         if (!qpair || !fcport)
569                 return -ENODEV;
570
571         if (!qpair->fw_started || fcport->deleted)
572                 return -EBUSY;
573
574         vha = fcport->vha;
575
576         if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
577                 return -ENODEV;
578
579         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
580             (qpair && !qpair->fw_started) || fcport->deleted)
581                 return -EBUSY;
582
583         /*
584          * If we know the dev is going away while the transport is still sending
585          * IO's return busy back to stall the IO Q.  This happens when the
586          * link goes away and fw hasn't notified us yet, but IO's are being
587          * returned. If the dev comes back quickly we won't exhaust the IO
588          * retry count at the core.
589          */
590         if (fcport->nvme_flag & NVME_FLAG_RESETTING)
591                 return -EBUSY;
592
593         /* Alloc SRB structure */
594         sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
595         if (!sp)
596                 return -EBUSY;
597
598         kref_init(&sp->cmd_kref);
599         spin_lock_init(&priv->cmd_lock);
600         sp->priv = priv;
601         priv->sp = sp;
602         sp->type = SRB_NVME_CMD;
603         sp->name = "nvme_cmd";
604         sp->done = qla_nvme_sp_done;
605         sp->put_fn = qla_nvme_release_fcp_cmd_kref;
606         sp->qpair = qpair;
607         sp->vha = vha;
608         nvme = &sp->u.iocb_cmd;
609         nvme->u.nvme.desc = fd;
610
611         rval = qla2x00_start_nvme_mq(sp);
612         if (rval != QLA_SUCCESS) {
613                 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
614                     "qla2x00_start_nvme_mq failed = %d\n", rval);
615                 sp->priv = NULL;
616                 priv->sp = NULL;
617                 qla2xxx_rel_qpair_sp(sp->qpair, sp);
618         }
619
620         return rval;
621 }
622
623 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
624 {
625         struct scsi_qla_host *vha = lport->private;
626
627         ql_log(ql_log_info, vha, 0x210f,
628             "localport delete of %p completed.\n", vha->nvme_local_port);
629         vha->nvme_local_port = NULL;
630         complete(&vha->nvme_del_done);
631 }
632
633 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
634 {
635         fc_port_t *fcport;
636         struct qla_nvme_rport *qla_rport = rport->private;
637
638         fcport = qla_rport->fcport;
639         fcport->nvme_remote_port = NULL;
640         fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
641         fcport->nvme_flag &= ~NVME_FLAG_DELETING;
642         ql_log(ql_log_info, fcport->vha, 0x2110,
643             "remoteport_delete of %p %8phN completed.\n",
644             fcport, fcport->port_name);
645         complete(&fcport->nvme_del_done);
646 }
647
648 static struct nvme_fc_port_template qla_nvme_fc_transport = {
649         .localport_delete = qla_nvme_localport_delete,
650         .remoteport_delete = qla_nvme_remoteport_delete,
651         .create_queue   = qla_nvme_alloc_queue,
652         .delete_queue   = NULL,
653         .ls_req         = qla_nvme_ls_req,
654         .ls_abort       = qla_nvme_ls_abort,
655         .fcp_io         = qla_nvme_post_cmd,
656         .fcp_abort      = qla_nvme_fcp_abort,
657         .max_hw_queues  = 8,
658         .max_sgl_segments = 1024,
659         .max_dif_sgl_segments = 64,
660         .dma_boundary = 0xFFFFFFFF,
661         .local_priv_sz  = 8,
662         .remote_priv_sz = sizeof(struct qla_nvme_rport),
663         .lsrqst_priv_sz = sizeof(struct nvme_private),
664         .fcprqst_priv_sz = sizeof(struct nvme_private),
665 };
666
667 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
668 {
669         int ret;
670
671         if (!IS_ENABLED(CONFIG_NVME_FC))
672                 return;
673
674         ql_log(ql_log_warn, NULL, 0x2112,
675             "%s: unregister remoteport on %p %8phN\n",
676             __func__, fcport, fcport->port_name);
677
678         if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
679                 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
680
681         init_completion(&fcport->nvme_del_done);
682         ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
683         if (ret)
684                 ql_log(ql_log_info, fcport->vha, 0x2114,
685                         "%s: Failed to unregister nvme_remote_port (%d)\n",
686                             __func__, ret);
687         wait_for_completion(&fcport->nvme_del_done);
688 }
689
690 void qla_nvme_delete(struct scsi_qla_host *vha)
691 {
692         int nv_ret;
693
694         if (!IS_ENABLED(CONFIG_NVME_FC))
695                 return;
696
697         if (vha->nvme_local_port) {
698                 init_completion(&vha->nvme_del_done);
699                 ql_log(ql_log_info, vha, 0x2116,
700                         "unregister localport=%p\n",
701                         vha->nvme_local_port);
702                 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
703                 if (nv_ret)
704                         ql_log(ql_log_info, vha, 0x2115,
705                             "Unregister of localport failed\n");
706                 else
707                         wait_for_completion(&vha->nvme_del_done);
708         }
709 }
710
711 int qla_nvme_register_hba(struct scsi_qla_host *vha)
712 {
713         struct nvme_fc_port_template *tmpl;
714         struct qla_hw_data *ha;
715         struct nvme_fc_port_info pinfo;
716         int ret = -EINVAL;
717
718         if (!IS_ENABLED(CONFIG_NVME_FC))
719                 return ret;
720
721         ha = vha->hw;
722         tmpl = &qla_nvme_fc_transport;
723
724         WARN_ON(vha->nvme_local_port);
725
726         qla_nvme_fc_transport.max_hw_queues =
727             min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
728                 (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
729
730         pinfo.node_name = wwn_to_u64(vha->node_name);
731         pinfo.port_name = wwn_to_u64(vha->port_name);
732         pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
733         pinfo.port_id = vha->d_id.b24;
734
735         ql_log(ql_log_info, vha, 0xffff,
736             "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
737             pinfo.node_name, pinfo.port_name, pinfo.port_id);
738         qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
739
740         ret = nvme_fc_register_localport(&pinfo, tmpl,
741             get_device(&ha->pdev->dev), &vha->nvme_local_port);
742         if (ret) {
743                 ql_log(ql_log_warn, vha, 0xffff,
744                     "register_localport failed: ret=%x\n", ret);
745         } else {
746                 vha->nvme_local_port->private = vha;
747         }
748
749         return ret;
750 }