GNU Linux-libre 5.15.54-gnu
[releases.git] / drivers / scsi / qla2xxx / qla_nvme.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11
12 static struct nvme_fc_port_template qla_nvme_fc_transport;
13
14 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
15 {
16         struct qla_nvme_rport *rport;
17         struct nvme_fc_port_info req;
18         int ret;
19
20         if (!IS_ENABLED(CONFIG_NVME_FC))
21                 return 0;
22
23         if (!vha->flags.nvme_enabled) {
24                 ql_log(ql_log_info, vha, 0x2100,
25                     "%s: Not registering target since Host NVME is not enabled\n",
26                     __func__);
27                 return 0;
28         }
29
30         if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
31                 return 0;
32
33         if (!(fcport->nvme_prli_service_param &
34             (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35                 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
36                 return 0;
37
38         if (atomic_read(&fcport->state) == FCS_ONLINE)
39                 return 0;
40
41         qla2x00_set_fcport_state(fcport, FCS_ONLINE);
42
43         fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
44
45         memset(&req, 0, sizeof(struct nvme_fc_port_info));
46         req.port_name = wwn_to_u64(fcport->port_name);
47         req.node_name = wwn_to_u64(fcport->node_name);
48         req.port_role = 0;
49         req.dev_loss_tmo = 0;
50
51         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
52                 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
53
54         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
55                 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
56
57         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
58                 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
59
60         req.port_id = fcport->d_id.b24;
61
62         ql_log(ql_log_info, vha, 0x2102,
63             "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
64             __func__, req.node_name, req.port_name,
65             req.port_id);
66
67         ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
68             &fcport->nvme_remote_port);
69         if (ret) {
70                 ql_log(ql_log_warn, vha, 0x212e,
71                     "Failed to register remote port. Transport returned %d\n",
72                     ret);
73                 return ret;
74         }
75
76         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
77                 ql_log(ql_log_info, vha, 0x212a,
78                        "PortID:%06x Supports SLER\n", req.port_id);
79
80         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
81                 ql_log(ql_log_info, vha, 0x212b,
82                        "PortID:%06x Supports PI control\n", req.port_id);
83
84         rport = fcport->nvme_remote_port->private;
85         rport->fcport = fcport;
86
87         fcport->nvme_flag |= NVME_FLAG_REGISTERED;
88         return 0;
89 }
90
91 /* Allocate a queue for NVMe traffic */
92 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
93     unsigned int qidx, u16 qsize, void **handle)
94 {
95         struct scsi_qla_host *vha;
96         struct qla_hw_data *ha;
97         struct qla_qpair *qpair;
98
99         /* Map admin queue and 1st IO queue to index 0 */
100         if (qidx)
101                 qidx--;
102
103         vha = (struct scsi_qla_host *)lport->private;
104         ha = vha->hw;
105
106         ql_log(ql_log_info, vha, 0x2104,
107             "%s: handle %p, idx =%d, qsize %d\n",
108             __func__, handle, qidx, qsize);
109
110         if (qidx > qla_nvme_fc_transport.max_hw_queues) {
111                 ql_log(ql_log_warn, vha, 0x212f,
112                     "%s: Illegal qidx=%d. Max=%d\n",
113                     __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
114                 return -EINVAL;
115         }
116
117         /* Use base qpair if max_qpairs is 0 */
118         if (!ha->max_qpairs) {
119                 qpair = ha->base_qpair;
120         } else {
121                 if (ha->queue_pair_map[qidx]) {
122                         *handle = ha->queue_pair_map[qidx];
123                         ql_log(ql_log_info, vha, 0x2121,
124                                "Returning existing qpair of %p for idx=%x\n",
125                                *handle, qidx);
126                         return 0;
127                 }
128
129                 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
130                 if (!qpair) {
131                         ql_log(ql_log_warn, vha, 0x2122,
132                                "Failed to allocate qpair\n");
133                         return -EINVAL;
134                 }
135         }
136         *handle = qpair;
137
138         return 0;
139 }
140
141 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
142 {
143         struct srb *sp = container_of(kref, struct srb, cmd_kref);
144         struct nvme_private *priv = (struct nvme_private *)sp->priv;
145         struct nvmefc_fcp_req *fd;
146         struct srb_iocb *nvme;
147         unsigned long flags;
148
149         if (!priv)
150                 goto out;
151
152         nvme = &sp->u.iocb_cmd;
153         fd = nvme->u.nvme.desc;
154
155         spin_lock_irqsave(&priv->cmd_lock, flags);
156         priv->sp = NULL;
157         sp->priv = NULL;
158         if (priv->comp_status == QLA_SUCCESS) {
159                 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
160                 fd->status = NVME_SC_SUCCESS;
161         } else {
162                 fd->rcv_rsplen = 0;
163                 fd->transferred_length = 0;
164                 fd->status = NVME_SC_INTERNAL;
165         }
166         spin_unlock_irqrestore(&priv->cmd_lock, flags);
167
168         fd->done(fd);
169 out:
170         qla2xxx_rel_qpair_sp(sp->qpair, sp);
171 }
172
173 static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
174 {
175         if (sp->flags & SRB_DMA_VALID) {
176                 struct srb_iocb *nvme = &sp->u.iocb_cmd;
177                 struct qla_hw_data *ha = sp->fcport->vha->hw;
178
179                 dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
180                                  fd->rqstlen, DMA_TO_DEVICE);
181                 sp->flags &= ~SRB_DMA_VALID;
182         }
183 }
184
185 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
186 {
187         struct srb *sp = container_of(kref, struct srb, cmd_kref);
188         struct nvme_private *priv = (struct nvme_private *)sp->priv;
189         struct nvmefc_ls_req *fd;
190         unsigned long flags;
191
192         if (!priv)
193                 goto out;
194
195         spin_lock_irqsave(&priv->cmd_lock, flags);
196         priv->sp = NULL;
197         sp->priv = NULL;
198         spin_unlock_irqrestore(&priv->cmd_lock, flags);
199
200         fd = priv->fd;
201
202         qla_nvme_ls_unmap(sp, fd);
203         fd->done(fd, priv->comp_status);
204 out:
205         qla2x00_rel_sp(sp);
206 }
207
208 static void qla_nvme_ls_complete(struct work_struct *work)
209 {
210         struct nvme_private *priv =
211                 container_of(work, struct nvme_private, ls_work);
212
213         kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
214 }
215
216 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
217 {
218         struct nvme_private *priv = sp->priv;
219
220         if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
221                 return;
222
223         if (res)
224                 res = -EINVAL;
225
226         priv->comp_status = res;
227         INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
228         schedule_work(&priv->ls_work);
229 }
230
231 /* it assumed that QPair lock is held. */
232 static void qla_nvme_sp_done(srb_t *sp, int res)
233 {
234         struct nvme_private *priv = sp->priv;
235
236         priv->comp_status = res;
237         kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
238
239         return;
240 }
241
242 static void qla_nvme_abort_work(struct work_struct *work)
243 {
244         struct nvme_private *priv =
245                 container_of(work, struct nvme_private, abort_work);
246         srb_t *sp = priv->sp;
247         fc_port_t *fcport = sp->fcport;
248         struct qla_hw_data *ha = fcport->vha->hw;
249         int rval, abts_done_called = 1;
250         bool io_wait_for_abort_done;
251         uint32_t handle;
252
253         ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
254                "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
255                __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
256
257         if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
258                 goto out;
259
260         if (ha->flags.host_shutting_down) {
261                 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
262                     "%s Calling done on sp: %p, type: 0x%x\n",
263                     __func__, sp, sp->type);
264                 sp->done(sp, 0);
265                 goto out;
266         }
267
268         /*
269          * sp may not be valid after abort_command if return code is either
270          * SUCCESS or ERR_FROM_FW codes, so cache the value here.
271          */
272         io_wait_for_abort_done = ql2xabts_wait_nvme &&
273                                         QLA_ABTS_WAIT_ENABLED(sp);
274         handle = sp->handle;
275
276         rval = ha->isp_ops->abort_command(sp);
277
278         ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
279             "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
280             __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
281             sp, handle, fcport, rval);
282
283         /*
284          * If async tmf is enabled, the abort callback is called only on
285          * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
286          */
287         if (ql2xasynctmfenable &&
288             rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
289                 abts_done_called = 0;
290
291         /*
292          * Returned before decreasing kref so that I/O requests
293          * are waited until ABTS complete. This kref is decreased
294          * at qla24xx_abort_sp_done function.
295          */
296         if (abts_done_called && io_wait_for_abort_done)
297                 return;
298 out:
299         /* kref_get was done before work was schedule. */
300         kref_put(&sp->cmd_kref, sp->put_fn);
301 }
302
303 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
304     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
305 {
306         struct nvme_private *priv = fd->private;
307         unsigned long flags;
308
309         spin_lock_irqsave(&priv->cmd_lock, flags);
310         if (!priv->sp) {
311                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
312                 return;
313         }
314
315         if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
316                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
317                 return;
318         }
319         spin_unlock_irqrestore(&priv->cmd_lock, flags);
320
321         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
322         schedule_work(&priv->abort_work);
323 }
324
325 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
326     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
327 {
328         struct qla_nvme_rport *qla_rport = rport->private;
329         fc_port_t *fcport = qla_rport->fcport;
330         struct srb_iocb   *nvme;
331         struct nvme_private *priv = fd->private;
332         struct scsi_qla_host *vha;
333         int     rval = QLA_FUNCTION_FAILED;
334         struct qla_hw_data *ha;
335         srb_t           *sp;
336
337         if (!fcport || fcport->deleted)
338                 return rval;
339
340         vha = fcport->vha;
341         ha = vha->hw;
342
343         if (!ha->flags.fw_started)
344                 return rval;
345
346         /* Alloc SRB structure */
347         sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
348         if (!sp)
349                 return rval;
350
351         sp->type = SRB_NVME_LS;
352         sp->name = "nvme_ls";
353         sp->done = qla_nvme_sp_ls_done;
354         sp->put_fn = qla_nvme_release_ls_cmd_kref;
355         sp->priv = priv;
356         priv->sp = sp;
357         kref_init(&sp->cmd_kref);
358         spin_lock_init(&priv->cmd_lock);
359         nvme = &sp->u.iocb_cmd;
360         priv->fd = fd;
361         nvme->u.nvme.desc = fd;
362         nvme->u.nvme.dir = 0;
363         nvme->u.nvme.dl = 0;
364         nvme->u.nvme.cmd_len = fd->rqstlen;
365         nvme->u.nvme.rsp_len = fd->rsplen;
366         nvme->u.nvme.rsp_dma = fd->rspdma;
367         nvme->u.nvme.timeout_sec = fd->timeout;
368         nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
369             fd->rqstlen, DMA_TO_DEVICE);
370         dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
371             fd->rqstlen, DMA_TO_DEVICE);
372
373         sp->flags |= SRB_DMA_VALID;
374
375         rval = qla2x00_start_sp(sp);
376         if (rval != QLA_SUCCESS) {
377                 ql_log(ql_log_warn, vha, 0x700e,
378                     "qla2x00_start_sp failed = %d\n", rval);
379                 wake_up(&sp->nvme_ls_waitq);
380                 sp->priv = NULL;
381                 priv->sp = NULL;
382                 qla_nvme_ls_unmap(sp, fd);
383                 qla2x00_rel_sp(sp);
384                 return rval;
385         }
386
387         return rval;
388 }
389
390 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
391     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
392     struct nvmefc_fcp_req *fd)
393 {
394         struct nvme_private *priv = fd->private;
395         unsigned long flags;
396
397         spin_lock_irqsave(&priv->cmd_lock, flags);
398         if (!priv->sp) {
399                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
400                 return;
401         }
402         if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
403                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
404                 return;
405         }
406         spin_unlock_irqrestore(&priv->cmd_lock, flags);
407
408         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
409         schedule_work(&priv->abort_work);
410 }
411
412 static inline int qla2x00_start_nvme_mq(srb_t *sp)
413 {
414         unsigned long   flags;
415         uint32_t        *clr_ptr;
416         uint32_t        handle;
417         struct cmd_nvme *cmd_pkt;
418         uint16_t        cnt, i;
419         uint16_t        req_cnt;
420         uint16_t        tot_dsds;
421         uint16_t        avail_dsds;
422         struct dsd64    *cur_dsd;
423         struct req_que *req = NULL;
424         struct scsi_qla_host *vha = sp->fcport->vha;
425         struct qla_hw_data *ha = vha->hw;
426         struct qla_qpair *qpair = sp->qpair;
427         struct srb_iocb *nvme = &sp->u.iocb_cmd;
428         struct scatterlist *sgl, *sg;
429         struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
430         struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
431         uint32_t        rval = QLA_SUCCESS;
432
433         /* Setup qpair pointers */
434         req = qpair->req;
435         tot_dsds = fd->sg_cnt;
436
437         /* Acquire qpair specific lock */
438         spin_lock_irqsave(&qpair->qp_lock, flags);
439
440         handle = qla2xxx_get_next_handle(req);
441         if (handle == 0) {
442                 rval = -EBUSY;
443                 goto queuing_error;
444         }
445         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
446         if (req->cnt < (req_cnt + 2)) {
447                 if (IS_SHADOW_REG_CAPABLE(ha)) {
448                         cnt = *req->out_ptr;
449                 } else {
450                         cnt = rd_reg_dword_relaxed(req->req_q_out);
451                         if (qla2x00_check_reg16_for_disconnect(vha, cnt))
452                                 goto queuing_error;
453                 }
454
455                 if (req->ring_index < cnt)
456                         req->cnt = cnt - req->ring_index;
457                 else
458                         req->cnt = req->length - (req->ring_index - cnt);
459
460                 if (req->cnt < (req_cnt + 2)){
461                         rval = -EBUSY;
462                         goto queuing_error;
463                 }
464         }
465
466         if (unlikely(!fd->sqid)) {
467                 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
468                         nvme->u.nvme.aen_op = 1;
469                         atomic_inc(&ha->nvme_active_aen_cnt);
470                 }
471         }
472
473         /* Build command packet. */
474         req->current_outstanding_cmd = handle;
475         req->outstanding_cmds[handle] = sp;
476         sp->handle = handle;
477         req->cnt -= req_cnt;
478
479         cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
480         cmd_pkt->handle = make_handle(req->id, handle);
481
482         /* Zero out remaining portion of packet. */
483         clr_ptr = (uint32_t *)cmd_pkt + 2;
484         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
485
486         cmd_pkt->entry_status = 0;
487
488         /* Update entry type to indicate Command NVME IOCB */
489         cmd_pkt->entry_type = COMMAND_NVME;
490
491         /* No data transfer how do we check buffer len == 0?? */
492         if (fd->io_dir == NVMEFC_FCP_READ) {
493                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
494                 qpair->counters.input_bytes += fd->payload_length;
495                 qpair->counters.input_requests++;
496         } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
497                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
498                 if ((vha->flags.nvme_first_burst) &&
499                     (sp->fcport->nvme_prli_service_param &
500                         NVME_PRLI_SP_FIRST_BURST)) {
501                         if ((fd->payload_length <=
502                             sp->fcport->nvme_first_burst_size) ||
503                                 (sp->fcport->nvme_first_burst_size == 0))
504                                 cmd_pkt->control_flags |=
505                                         cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
506                 }
507                 qpair->counters.output_bytes += fd->payload_length;
508                 qpair->counters.output_requests++;
509         } else if (fd->io_dir == 0) {
510                 cmd_pkt->control_flags = 0;
511         }
512
513         if (sp->fcport->edif.enable && fd->io_dir != 0)
514                 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
515
516         /* Set BIT_13 of control flags for Async event */
517         if (vha->flags.nvme2_enabled &&
518             cmd->sqe.common.opcode == nvme_admin_async_event) {
519                 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
520         }
521
522         /* Set NPORT-ID */
523         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
524         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
525         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
526         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
527         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
528
529         /* NVME RSP IU */
530         cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
531         put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
532
533         /* NVME CNMD IU */
534         cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
535         cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
536
537         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
538         cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
539
540         /* One DSD is available in the Command Type NVME IOCB */
541         avail_dsds = 1;
542         cur_dsd = &cmd_pkt->nvme_dsd;
543         sgl = fd->first_sgl;
544
545         /* Load data segments */
546         for_each_sg(sgl, sg, tot_dsds, i) {
547                 cont_a64_entry_t *cont_pkt;
548
549                 /* Allocate additional continuation packets? */
550                 if (avail_dsds == 0) {
551                         /*
552                          * Five DSDs are available in the Continuation
553                          * Type 1 IOCB.
554                          */
555
556                         /* Adjust ring index */
557                         req->ring_index++;
558                         if (req->ring_index == req->length) {
559                                 req->ring_index = 0;
560                                 req->ring_ptr = req->ring;
561                         } else {
562                                 req->ring_ptr++;
563                         }
564                         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
565                         put_unaligned_le32(CONTINUE_A64_TYPE,
566                                            &cont_pkt->entry_type);
567
568                         cur_dsd = cont_pkt->dsd;
569                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
570                 }
571
572                 append_dsd64(&cur_dsd, sg);
573                 avail_dsds--;
574         }
575
576         /* Set total entry count. */
577         cmd_pkt->entry_count = (uint8_t)req_cnt;
578         wmb();
579
580         /* Adjust ring index. */
581         req->ring_index++;
582         if (req->ring_index == req->length) {
583                 req->ring_index = 0;
584                 req->ring_ptr = req->ring;
585         } else {
586                 req->ring_ptr++;
587         }
588
589         /* ignore nvme async cmd due to long timeout */
590         if (!nvme->u.nvme.aen_op)
591                 sp->qpair->cmd_cnt++;
592
593         /* Set chip new ring index. */
594         wrt_reg_dword(req->req_q_in, req->ring_index);
595
596 queuing_error:
597         spin_unlock_irqrestore(&qpair->qp_lock, flags);
598
599         return rval;
600 }
601
602 /* Post a command */
603 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
604     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
605     struct nvmefc_fcp_req *fd)
606 {
607         fc_port_t *fcport;
608         struct srb_iocb *nvme;
609         struct scsi_qla_host *vha;
610         int rval;
611         srb_t *sp;
612         struct qla_qpair *qpair = hw_queue_handle;
613         struct nvme_private *priv = fd->private;
614         struct qla_nvme_rport *qla_rport = rport->private;
615
616         if (!priv) {
617                 /* nvme association has been torn down */
618                 return -ENODEV;
619         }
620
621         fcport = qla_rport->fcport;
622
623         if (unlikely(!qpair || !fcport || fcport->deleted))
624                 return -EBUSY;
625
626         if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
627                 return -ENODEV;
628
629         vha = fcport->vha;
630
631         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
632                 return -EBUSY;
633
634         /*
635          * If we know the dev is going away while the transport is still sending
636          * IO's return busy back to stall the IO Q.  This happens when the
637          * link goes away and fw hasn't notified us yet, but IO's are being
638          * returned. If the dev comes back quickly we won't exhaust the IO
639          * retry count at the core.
640          */
641         if (fcport->nvme_flag & NVME_FLAG_RESETTING)
642                 return -EBUSY;
643
644         /* Alloc SRB structure */
645         sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
646         if (!sp)
647                 return -EBUSY;
648
649         init_waitqueue_head(&sp->nvme_ls_waitq);
650         kref_init(&sp->cmd_kref);
651         spin_lock_init(&priv->cmd_lock);
652         sp->priv = priv;
653         priv->sp = sp;
654         sp->type = SRB_NVME_CMD;
655         sp->name = "nvme_cmd";
656         sp->done = qla_nvme_sp_done;
657         sp->put_fn = qla_nvme_release_fcp_cmd_kref;
658         sp->qpair = qpair;
659         sp->vha = vha;
660         sp->cmd_sp = sp;
661         nvme = &sp->u.iocb_cmd;
662         nvme->u.nvme.desc = fd;
663
664         rval = qla2x00_start_nvme_mq(sp);
665         if (rval != QLA_SUCCESS) {
666                 ql_log(ql_log_warn, vha, 0x212d,
667                     "qla2x00_start_nvme_mq failed = %d\n", rval);
668                 wake_up(&sp->nvme_ls_waitq);
669                 sp->priv = NULL;
670                 priv->sp = NULL;
671                 qla2xxx_rel_qpair_sp(sp->qpair, sp);
672         }
673
674         return rval;
675 }
676
677 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
678 {
679         struct scsi_qla_host *vha = lport->private;
680
681         ql_log(ql_log_info, vha, 0x210f,
682             "localport delete of %p completed.\n", vha->nvme_local_port);
683         vha->nvme_local_port = NULL;
684         complete(&vha->nvme_del_done);
685 }
686
687 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
688 {
689         fc_port_t *fcport;
690         struct qla_nvme_rport *qla_rport = rport->private;
691
692         fcport = qla_rport->fcport;
693         fcport->nvme_remote_port = NULL;
694         fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
695         fcport->nvme_flag &= ~NVME_FLAG_DELETING;
696         ql_log(ql_log_info, fcport->vha, 0x2110,
697             "remoteport_delete of %p %8phN completed.\n",
698             fcport, fcport->port_name);
699         complete(&fcport->nvme_del_done);
700 }
701
702 static struct nvme_fc_port_template qla_nvme_fc_transport = {
703         .localport_delete = qla_nvme_localport_delete,
704         .remoteport_delete = qla_nvme_remoteport_delete,
705         .create_queue   = qla_nvme_alloc_queue,
706         .delete_queue   = NULL,
707         .ls_req         = qla_nvme_ls_req,
708         .ls_abort       = qla_nvme_ls_abort,
709         .fcp_io         = qla_nvme_post_cmd,
710         .fcp_abort      = qla_nvme_fcp_abort,
711         .max_hw_queues  = 8,
712         .max_sgl_segments = 1024,
713         .max_dif_sgl_segments = 64,
714         .dma_boundary = 0xFFFFFFFF,
715         .local_priv_sz  = 8,
716         .remote_priv_sz = sizeof(struct qla_nvme_rport),
717         .lsrqst_priv_sz = sizeof(struct nvme_private),
718         .fcprqst_priv_sz = sizeof(struct nvme_private),
719 };
720
721 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
722 {
723         int ret;
724
725         if (!IS_ENABLED(CONFIG_NVME_FC))
726                 return;
727
728         ql_log(ql_log_warn, fcport->vha, 0x2112,
729             "%s: unregister remoteport on %p %8phN\n",
730             __func__, fcport, fcport->port_name);
731
732         if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
733                 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
734
735         init_completion(&fcport->nvme_del_done);
736         ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
737         if (ret)
738                 ql_log(ql_log_info, fcport->vha, 0x2114,
739                         "%s: Failed to unregister nvme_remote_port (%d)\n",
740                             __func__, ret);
741         wait_for_completion(&fcport->nvme_del_done);
742 }
743
744 void qla_nvme_delete(struct scsi_qla_host *vha)
745 {
746         int nv_ret;
747
748         if (!IS_ENABLED(CONFIG_NVME_FC))
749                 return;
750
751         if (vha->nvme_local_port) {
752                 init_completion(&vha->nvme_del_done);
753                 ql_log(ql_log_info, vha, 0x2116,
754                         "unregister localport=%p\n",
755                         vha->nvme_local_port);
756                 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
757                 if (nv_ret)
758                         ql_log(ql_log_info, vha, 0x2115,
759                             "Unregister of localport failed\n");
760                 else
761                         wait_for_completion(&vha->nvme_del_done);
762         }
763 }
764
765 int qla_nvme_register_hba(struct scsi_qla_host *vha)
766 {
767         struct nvme_fc_port_template *tmpl;
768         struct qla_hw_data *ha;
769         struct nvme_fc_port_info pinfo;
770         int ret = -EINVAL;
771
772         if (!IS_ENABLED(CONFIG_NVME_FC))
773                 return ret;
774
775         ha = vha->hw;
776         tmpl = &qla_nvme_fc_transport;
777
778
779         qla_nvme_fc_transport.max_hw_queues =
780             min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
781                 (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
782
783         pinfo.node_name = wwn_to_u64(vha->node_name);
784         pinfo.port_name = wwn_to_u64(vha->port_name);
785         pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
786         pinfo.port_id = vha->d_id.b24;
787
788         mutex_lock(&ha->vport_lock);
789         /*
790          * Check again for nvme_local_port to see if any other thread raced
791          * with this one and finished registration.
792          */
793         if (!vha->nvme_local_port) {
794                 ql_log(ql_log_info, vha, 0xffff,
795                     "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
796                     pinfo.node_name, pinfo.port_name, pinfo.port_id);
797                 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
798
799                 ret = nvme_fc_register_localport(&pinfo, tmpl,
800                                                  get_device(&ha->pdev->dev),
801                                                  &vha->nvme_local_port);
802                 mutex_unlock(&ha->vport_lock);
803         } else {
804                 mutex_unlock(&ha->vport_lock);
805                 return 0;
806         }
807         if (ret) {
808                 ql_log(ql_log_warn, vha, 0xffff,
809                     "register_localport failed: ret=%x\n", ret);
810         } else {
811                 vha->nvme_local_port->private = vha;
812         }
813
814         return ret;
815 }
816
817 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
818 {
819         struct qla_hw_data *ha;
820
821         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
822                 return;
823
824         ha = orig_sp->fcport->vha->hw;
825
826         WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
827         /* Use Driver Specified Retry Count */
828         abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
829         abt->drv.abts_rty_cnt = cpu_to_le16(2);
830         /* Use specified response timeout */
831         abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
832         /* set it to 2 * r_a_tov in secs */
833         abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
834 }
835
836 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
837 {
838         u16     comp_status;
839         struct scsi_qla_host *vha;
840
841         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
842                 return;
843
844         vha = orig_sp->fcport->vha;
845
846         comp_status = le16_to_cpu(abt->comp_status);
847         switch (comp_status) {
848         case CS_RESET:          /* reset event aborted */
849         case CS_ABORTED:        /* IOCB was cleaned */
850         /* N_Port handle is not currently logged in */
851         case CS_TIMEOUT:
852         /* N_Port handle was logged out while waiting for ABTS to complete */
853         case CS_PORT_UNAVAILABLE:
854         /* Firmware found that the port name changed */
855         case CS_PORT_LOGGED_OUT:
856         /* BA_RJT was received for the ABTS */
857         case CS_PORT_CONFIG_CHG:
858                 ql_dbg(ql_dbg_async, vha, 0xf09d,
859                        "Abort I/O IOCB completed with error, comp_status=%x\n",
860                 comp_status);
861                 break;
862
863         /* BA_RJT was received for the ABTS */
864         case CS_REJECT_RECEIVED:
865                 ql_dbg(ql_dbg_async, vha, 0xf09e,
866                        "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
867                         abt->fw.ba_rjt_vendorUnique);
868                 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
869                        "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
870                        abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
871                 break;
872
873         case CS_COMPLETE:
874                 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
875                        "IOCB request is completed successfully comp_status=%x\n",
876                 comp_status);
877                 break;
878
879         case CS_IOCB_ERROR:
880                 ql_dbg(ql_dbg_async, vha, 0xf0a0,
881                        "IOCB request is failed, comp_status=%x\n", comp_status);
882                 break;
883
884         default:
885                 ql_dbg(ql_dbg_async, vha, 0xf0a1,
886                        "Invalid Abort IO IOCB Completion Status %x\n",
887                 comp_status);
888                 break;
889         }
890 }
891
892 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
893 {
894         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
895                 return;
896         kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
897 }