1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2017 QLogic Corporation
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-mq.h>
14 static struct nvme_fc_port_template qla_nvme_fc_transport;
16 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
18 struct qla_nvme_rport *rport;
19 struct nvme_fc_port_info req;
22 if (!IS_ENABLED(CONFIG_NVME_FC))
25 if (!vha->flags.nvme_enabled) {
26 ql_log(ql_log_info, vha, 0x2100,
27 "%s: Not registering target since Host NVME is not enabled\n",
32 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
35 if (!(fcport->nvme_prli_service_param &
36 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
37 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
40 if (atomic_read(&fcport->state) == FCS_ONLINE)
43 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
45 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
47 memset(&req, 0, sizeof(struct nvme_fc_port_info));
48 req.port_name = wwn_to_u64(fcport->port_name);
49 req.node_name = wwn_to_u64(fcport->node_name);
51 req.dev_loss_tmo = fcport->dev_loss_tmo;
53 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
54 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
56 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
57 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
59 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
60 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
62 req.port_id = fcport->d_id.b24;
64 ql_log(ql_log_info, vha, 0x2102,
65 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
66 __func__, req.node_name, req.port_name,
69 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
70 &fcport->nvme_remote_port);
72 ql_log(ql_log_warn, vha, 0x212e,
73 "Failed to register remote port. Transport returned %d\n",
78 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
79 fcport->dev_loss_tmo);
81 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
82 ql_log(ql_log_info, vha, 0x212a,
83 "PortID:%06x Supports SLER\n", req.port_id);
85 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
86 ql_log(ql_log_info, vha, 0x212b,
87 "PortID:%06x Supports PI control\n", req.port_id);
89 rport = fcport->nvme_remote_port->private;
90 rport->fcport = fcport;
92 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
96 /* Allocate a queue for NVMe traffic */
97 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
98 unsigned int qidx, u16 qsize, void **handle)
100 struct scsi_qla_host *vha;
101 struct qla_hw_data *ha;
102 struct qla_qpair *qpair;
104 /* Map admin queue and 1st IO queue to index 0 */
108 vha = (struct scsi_qla_host *)lport->private;
111 ql_log(ql_log_info, vha, 0x2104,
112 "%s: handle %p, idx =%d, qsize %d\n",
113 __func__, handle, qidx, qsize);
115 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
116 ql_log(ql_log_warn, vha, 0x212f,
117 "%s: Illegal qidx=%d. Max=%d\n",
118 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
122 /* Use base qpair if max_qpairs is 0 */
123 if (!ha->max_qpairs) {
124 qpair = ha->base_qpair;
126 if (ha->queue_pair_map[qidx]) {
127 *handle = ha->queue_pair_map[qidx];
128 ql_log(ql_log_info, vha, 0x2121,
129 "Returning existing qpair of %p for idx=%x\n",
134 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
136 ql_log(ql_log_warn, vha, 0x2122,
137 "Failed to allocate qpair\n");
146 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
148 struct srb *sp = container_of(kref, struct srb, cmd_kref);
149 struct nvme_private *priv = (struct nvme_private *)sp->priv;
150 struct nvmefc_fcp_req *fd;
151 struct srb_iocb *nvme;
157 nvme = &sp->u.iocb_cmd;
158 fd = nvme->u.nvme.desc;
160 spin_lock_irqsave(&priv->cmd_lock, flags);
163 if (priv->comp_status == QLA_SUCCESS) {
164 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
165 fd->status = NVME_SC_SUCCESS;
168 fd->transferred_length = 0;
169 fd->status = NVME_SC_INTERNAL;
171 spin_unlock_irqrestore(&priv->cmd_lock, flags);
175 qla2xxx_rel_qpair_sp(sp->qpair, sp);
178 static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
180 if (sp->flags & SRB_DMA_VALID) {
181 struct srb_iocb *nvme = &sp->u.iocb_cmd;
182 struct qla_hw_data *ha = sp->fcport->vha->hw;
184 dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
185 fd->rqstlen, DMA_TO_DEVICE);
186 sp->flags &= ~SRB_DMA_VALID;
190 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
192 struct srb *sp = container_of(kref, struct srb, cmd_kref);
193 struct nvme_private *priv = (struct nvme_private *)sp->priv;
194 struct nvmefc_ls_req *fd;
200 spin_lock_irqsave(&priv->cmd_lock, flags);
203 spin_unlock_irqrestore(&priv->cmd_lock, flags);
207 qla_nvme_ls_unmap(sp, fd);
208 fd->done(fd, priv->comp_status);
213 static void qla_nvme_ls_complete(struct work_struct *work)
215 struct nvme_private *priv =
216 container_of(work, struct nvme_private, ls_work);
218 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
221 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
223 struct nvme_private *priv = sp->priv;
225 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
231 priv->comp_status = res;
232 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
233 schedule_work(&priv->ls_work);
236 /* it assumed that QPair lock is held. */
237 static void qla_nvme_sp_done(srb_t *sp, int res)
239 struct nvme_private *priv = sp->priv;
241 priv->comp_status = res;
242 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
247 static void qla_nvme_abort_work(struct work_struct *work)
249 struct nvme_private *priv =
250 container_of(work, struct nvme_private, abort_work);
251 srb_t *sp = priv->sp;
252 fc_port_t *fcport = sp->fcport;
253 struct qla_hw_data *ha = fcport->vha->hw;
254 int rval, abts_done_called = 1;
255 bool io_wait_for_abort_done;
258 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
259 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
260 __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
262 if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
265 if (ha->flags.host_shutting_down) {
266 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
267 "%s Calling done on sp: %p, type: 0x%x\n",
268 __func__, sp, sp->type);
274 * sp may not be valid after abort_command if return code is either
275 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
277 io_wait_for_abort_done = ql2xabts_wait_nvme &&
278 QLA_ABTS_WAIT_ENABLED(sp);
281 rval = ha->isp_ops->abort_command(sp);
283 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
284 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
285 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
286 sp, handle, fcport, rval);
289 * If async tmf is enabled, the abort callback is called only on
290 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
292 if (ql2xasynctmfenable &&
293 rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
294 abts_done_called = 0;
297 * Returned before decreasing kref so that I/O requests
298 * are waited until ABTS complete. This kref is decreased
299 * at qla24xx_abort_sp_done function.
301 if (abts_done_called && io_wait_for_abort_done)
304 /* kref_get was done before work was schedule. */
305 kref_put(&sp->cmd_kref, sp->put_fn);
308 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
309 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
311 struct nvme_private *priv = fd->private;
314 spin_lock_irqsave(&priv->cmd_lock, flags);
316 spin_unlock_irqrestore(&priv->cmd_lock, flags);
320 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
321 spin_unlock_irqrestore(&priv->cmd_lock, flags);
324 spin_unlock_irqrestore(&priv->cmd_lock, flags);
326 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
327 schedule_work(&priv->abort_work);
330 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
331 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
333 struct qla_nvme_rport *qla_rport = rport->private;
334 fc_port_t *fcport = qla_rport->fcport;
335 struct srb_iocb *nvme;
336 struct nvme_private *priv = fd->private;
337 struct scsi_qla_host *vha;
338 int rval = QLA_FUNCTION_FAILED;
339 struct qla_hw_data *ha;
342 if (!fcport || fcport->deleted)
348 if (!ha->flags.fw_started)
351 /* Alloc SRB structure */
352 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
356 sp->type = SRB_NVME_LS;
357 sp->name = "nvme_ls";
358 sp->done = qla_nvme_sp_ls_done;
359 sp->put_fn = qla_nvme_release_ls_cmd_kref;
362 kref_init(&sp->cmd_kref);
363 spin_lock_init(&priv->cmd_lock);
364 nvme = &sp->u.iocb_cmd;
366 nvme->u.nvme.desc = fd;
367 nvme->u.nvme.dir = 0;
369 nvme->u.nvme.cmd_len = fd->rqstlen;
370 nvme->u.nvme.rsp_len = fd->rsplen;
371 nvme->u.nvme.rsp_dma = fd->rspdma;
372 nvme->u.nvme.timeout_sec = fd->timeout;
373 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
374 fd->rqstlen, DMA_TO_DEVICE);
375 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
376 fd->rqstlen, DMA_TO_DEVICE);
378 sp->flags |= SRB_DMA_VALID;
380 rval = qla2x00_start_sp(sp);
381 if (rval != QLA_SUCCESS) {
382 ql_log(ql_log_warn, vha, 0x700e,
383 "qla2x00_start_sp failed = %d\n", rval);
384 wake_up(&sp->nvme_ls_waitq);
387 qla_nvme_ls_unmap(sp, fd);
395 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
396 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
397 struct nvmefc_fcp_req *fd)
399 struct nvme_private *priv = fd->private;
402 spin_lock_irqsave(&priv->cmd_lock, flags);
404 spin_unlock_irqrestore(&priv->cmd_lock, flags);
407 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
408 spin_unlock_irqrestore(&priv->cmd_lock, flags);
411 spin_unlock_irqrestore(&priv->cmd_lock, flags);
413 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
414 schedule_work(&priv->abort_work);
417 static inline int qla2x00_start_nvme_mq(srb_t *sp)
422 struct cmd_nvme *cmd_pkt;
427 struct dsd64 *cur_dsd;
428 struct req_que *req = NULL;
429 struct rsp_que *rsp = NULL;
430 struct scsi_qla_host *vha = sp->fcport->vha;
431 struct qla_hw_data *ha = vha->hw;
432 struct qla_qpair *qpair = sp->qpair;
433 struct srb_iocb *nvme = &sp->u.iocb_cmd;
434 struct scatterlist *sgl, *sg;
435 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
436 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
437 uint32_t rval = QLA_SUCCESS;
439 /* Setup qpair pointers */
442 tot_dsds = fd->sg_cnt;
444 /* Acquire qpair specific lock */
445 spin_lock_irqsave(&qpair->qp_lock, flags);
447 handle = qla2xxx_get_next_handle(req);
452 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
453 if (req->cnt < (req_cnt + 2)) {
454 if (IS_SHADOW_REG_CAPABLE(ha)) {
457 cnt = rd_reg_dword_relaxed(req->req_q_out);
458 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
462 if (req->ring_index < cnt)
463 req->cnt = cnt - req->ring_index;
465 req->cnt = req->length - (req->ring_index - cnt);
467 if (req->cnt < (req_cnt + 2)){
473 if (unlikely(!fd->sqid)) {
474 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
475 nvme->u.nvme.aen_op = 1;
476 atomic_inc(&ha->nvme_active_aen_cnt);
480 /* Build command packet. */
481 req->current_outstanding_cmd = handle;
482 req->outstanding_cmds[handle] = sp;
486 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
487 cmd_pkt->handle = make_handle(req->id, handle);
489 /* Zero out remaining portion of packet. */
490 clr_ptr = (uint32_t *)cmd_pkt + 2;
491 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
493 cmd_pkt->entry_status = 0;
495 /* Update entry type to indicate Command NVME IOCB */
496 cmd_pkt->entry_type = COMMAND_NVME;
498 /* No data transfer how do we check buffer len == 0?? */
499 if (fd->io_dir == NVMEFC_FCP_READ) {
500 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
501 qpair->counters.input_bytes += fd->payload_length;
502 qpair->counters.input_requests++;
503 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
504 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
505 if ((vha->flags.nvme_first_burst) &&
506 (sp->fcport->nvme_prli_service_param &
507 NVME_PRLI_SP_FIRST_BURST)) {
508 if ((fd->payload_length <=
509 sp->fcport->nvme_first_burst_size) ||
510 (sp->fcport->nvme_first_burst_size == 0))
511 cmd_pkt->control_flags |=
512 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
514 qpair->counters.output_bytes += fd->payload_length;
515 qpair->counters.output_requests++;
516 } else if (fd->io_dir == 0) {
517 cmd_pkt->control_flags = 0;
520 if (sp->fcport->edif.enable && fd->io_dir != 0)
521 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
523 /* Set BIT_13 of control flags for Async event */
524 if (vha->flags.nvme2_enabled &&
525 cmd->sqe.common.opcode == nvme_admin_async_event) {
526 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
530 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
531 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
532 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
533 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
534 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
537 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
538 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
541 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
542 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
544 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
545 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
547 /* One DSD is available in the Command Type NVME IOCB */
549 cur_dsd = &cmd_pkt->nvme_dsd;
552 /* Load data segments */
553 for_each_sg(sgl, sg, tot_dsds, i) {
554 cont_a64_entry_t *cont_pkt;
556 /* Allocate additional continuation packets? */
557 if (avail_dsds == 0) {
559 * Five DSDs are available in the Continuation
563 /* Adjust ring index */
565 if (req->ring_index == req->length) {
567 req->ring_ptr = req->ring;
571 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
572 put_unaligned_le32(CONTINUE_A64_TYPE,
573 &cont_pkt->entry_type);
575 cur_dsd = cont_pkt->dsd;
576 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
579 append_dsd64(&cur_dsd, sg);
583 /* Set total entry count. */
584 cmd_pkt->entry_count = (uint8_t)req_cnt;
587 /* Adjust ring index. */
589 if (req->ring_index == req->length) {
591 req->ring_ptr = req->ring;
596 /* ignore nvme async cmd due to long timeout */
597 if (!nvme->u.nvme.aen_op)
598 sp->qpair->cmd_cnt++;
600 /* Set chip new ring index. */
601 wrt_reg_dword(req->req_q_in, req->ring_index);
603 if (vha->flags.process_response_queue &&
604 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
605 qla24xx_process_response_queue(vha, rsp);
608 spin_unlock_irqrestore(&qpair->qp_lock, flags);
614 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
615 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
616 struct nvmefc_fcp_req *fd)
619 struct srb_iocb *nvme;
620 struct scsi_qla_host *vha;
623 struct qla_qpair *qpair = hw_queue_handle;
624 struct nvme_private *priv = fd->private;
625 struct qla_nvme_rport *qla_rport = rport->private;
628 /* nvme association has been torn down */
632 fcport = qla_rport->fcport;
634 if (unlikely(!qpair || !fcport || fcport->deleted))
637 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
642 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
646 * If we know the dev is going away while the transport is still sending
647 * IO's return busy back to stall the IO Q. This happens when the
648 * link goes away and fw hasn't notified us yet, but IO's are being
649 * returned. If the dev comes back quickly we won't exhaust the IO
650 * retry count at the core.
652 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
655 /* Alloc SRB structure */
656 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
660 init_waitqueue_head(&sp->nvme_ls_waitq);
661 kref_init(&sp->cmd_kref);
662 spin_lock_init(&priv->cmd_lock);
665 sp->type = SRB_NVME_CMD;
666 sp->name = "nvme_cmd";
667 sp->done = qla_nvme_sp_done;
668 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
672 nvme = &sp->u.iocb_cmd;
673 nvme->u.nvme.desc = fd;
675 rval = qla2x00_start_nvme_mq(sp);
676 if (rval != QLA_SUCCESS) {
677 ql_log(ql_log_warn, vha, 0x212d,
678 "qla2x00_start_nvme_mq failed = %d\n", rval);
679 wake_up(&sp->nvme_ls_waitq);
682 qla2xxx_rel_qpair_sp(sp->qpair, sp);
688 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
689 struct blk_mq_queue_map *map)
691 struct scsi_qla_host *vha = lport->private;
694 rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
696 ql_log(ql_log_warn, vha, 0x21de,
697 "pci map queue failed 0x%x", rc);
700 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
702 struct scsi_qla_host *vha = lport->private;
704 ql_log(ql_log_info, vha, 0x210f,
705 "localport delete of %p completed.\n", vha->nvme_local_port);
706 vha->nvme_local_port = NULL;
707 complete(&vha->nvme_del_done);
710 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
713 struct qla_nvme_rport *qla_rport = rport->private;
715 fcport = qla_rport->fcport;
716 fcport->nvme_remote_port = NULL;
717 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
718 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
719 ql_log(ql_log_info, fcport->vha, 0x2110,
720 "remoteport_delete of %p %8phN completed.\n",
721 fcport, fcport->port_name);
722 complete(&fcport->nvme_del_done);
725 static struct nvme_fc_port_template qla_nvme_fc_transport = {
726 .localport_delete = qla_nvme_localport_delete,
727 .remoteport_delete = qla_nvme_remoteport_delete,
728 .create_queue = qla_nvme_alloc_queue,
729 .delete_queue = NULL,
730 .ls_req = qla_nvme_ls_req,
731 .ls_abort = qla_nvme_ls_abort,
732 .fcp_io = qla_nvme_post_cmd,
733 .fcp_abort = qla_nvme_fcp_abort,
734 .map_queues = qla_nvme_map_queues,
735 .max_hw_queues = DEF_NVME_HW_QUEUES,
736 .max_sgl_segments = 1024,
737 .max_dif_sgl_segments = 64,
738 .dma_boundary = 0xFFFFFFFF,
740 .remote_priv_sz = sizeof(struct qla_nvme_rport),
741 .lsrqst_priv_sz = sizeof(struct nvme_private),
742 .fcprqst_priv_sz = sizeof(struct nvme_private),
745 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
749 if (!IS_ENABLED(CONFIG_NVME_FC))
752 ql_log(ql_log_warn, fcport->vha, 0x2112,
753 "%s: unregister remoteport on %p %8phN\n",
754 __func__, fcport, fcport->port_name);
756 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
757 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
759 init_completion(&fcport->nvme_del_done);
760 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
762 ql_log(ql_log_info, fcport->vha, 0x2114,
763 "%s: Failed to unregister nvme_remote_port (%d)\n",
765 wait_for_completion(&fcport->nvme_del_done);
768 void qla_nvme_delete(struct scsi_qla_host *vha)
772 if (!IS_ENABLED(CONFIG_NVME_FC))
775 if (vha->nvme_local_port) {
776 init_completion(&vha->nvme_del_done);
777 ql_log(ql_log_info, vha, 0x2116,
778 "unregister localport=%p\n",
779 vha->nvme_local_port);
780 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
782 ql_log(ql_log_info, vha, 0x2115,
783 "Unregister of localport failed\n");
785 wait_for_completion(&vha->nvme_del_done);
789 int qla_nvme_register_hba(struct scsi_qla_host *vha)
791 struct nvme_fc_port_template *tmpl;
792 struct qla_hw_data *ha;
793 struct nvme_fc_port_info pinfo;
796 if (!IS_ENABLED(CONFIG_NVME_FC))
800 tmpl = &qla_nvme_fc_transport;
802 if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
803 ql_log(ql_log_warn, vha, 0xfffd,
804 "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
805 ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
806 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
807 } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
808 ql_log(ql_log_warn, vha, 0xfffd,
809 "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
810 ql2xnvme_queues, (ha->max_qpairs - 1),
811 (ha->max_qpairs - 1));
812 ql2xnvme_queues = ((ha->max_qpairs - 1));
815 qla_nvme_fc_transport.max_hw_queues =
816 min((uint8_t)(ql2xnvme_queues),
817 (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
819 ql_log(ql_log_info, vha, 0xfffb,
820 "Number of NVME queues used for this port: %d\n",
821 qla_nvme_fc_transport.max_hw_queues);
823 pinfo.node_name = wwn_to_u64(vha->node_name);
824 pinfo.port_name = wwn_to_u64(vha->port_name);
825 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
826 pinfo.port_id = vha->d_id.b24;
828 mutex_lock(&ha->vport_lock);
830 * Check again for nvme_local_port to see if any other thread raced
831 * with this one and finished registration.
833 if (!vha->nvme_local_port) {
834 ql_log(ql_log_info, vha, 0xffff,
835 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
836 pinfo.node_name, pinfo.port_name, pinfo.port_id);
837 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
839 ret = nvme_fc_register_localport(&pinfo, tmpl,
840 get_device(&ha->pdev->dev),
841 &vha->nvme_local_port);
842 mutex_unlock(&ha->vport_lock);
844 mutex_unlock(&ha->vport_lock);
848 ql_log(ql_log_warn, vha, 0xffff,
849 "register_localport failed: ret=%x\n", ret);
851 vha->nvme_local_port->private = vha;
857 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
859 struct qla_hw_data *ha;
861 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
864 ha = orig_sp->fcport->vha->hw;
866 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
867 /* Use Driver Specified Retry Count */
868 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
869 abt->drv.abts_rty_cnt = cpu_to_le16(2);
870 /* Use specified response timeout */
871 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
872 /* set it to 2 * r_a_tov in secs */
873 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
876 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
879 struct scsi_qla_host *vha;
881 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
884 vha = orig_sp->fcport->vha;
886 comp_status = le16_to_cpu(abt->comp_status);
887 switch (comp_status) {
888 case CS_RESET: /* reset event aborted */
889 case CS_ABORTED: /* IOCB was cleaned */
890 /* N_Port handle is not currently logged in */
892 /* N_Port handle was logged out while waiting for ABTS to complete */
893 case CS_PORT_UNAVAILABLE:
894 /* Firmware found that the port name changed */
895 case CS_PORT_LOGGED_OUT:
896 /* BA_RJT was received for the ABTS */
897 case CS_PORT_CONFIG_CHG:
898 ql_dbg(ql_dbg_async, vha, 0xf09d,
899 "Abort I/O IOCB completed with error, comp_status=%x\n",
903 /* BA_RJT was received for the ABTS */
904 case CS_REJECT_RECEIVED:
905 ql_dbg(ql_dbg_async, vha, 0xf09e,
906 "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
907 abt->fw.ba_rjt_vendorUnique);
908 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
909 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
910 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
914 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
915 "IOCB request is completed successfully comp_status=%x\n",
920 ql_dbg(ql_dbg_async, vha, 0xf0a0,
921 "IOCB request is failed, comp_status=%x\n", comp_status);
925 ql_dbg(ql_dbg_async, vha, 0xf0a1,
926 "Invalid Abort IO IOCB Completion Status %x\n",
932 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
934 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
936 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);