2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
19 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
22 * This read/write semaphore is used to synchronize access to configuration
23 * information on a target system that will result in discovery log page
24 * information change for at least one host.
25 * The full list of resources to protected by this semaphore is:
28 * - per-subsystem allowed hosts list
29 * - allow_any_host subsystem attribute
31 * - the nvmet_transports array
33 * When updating any of those lists/structures write lock should be obtained,
34 * while when reading (popolating discovery log page or checking host-subsystem
35 * link) read lock is obtained to allow concurrent reads.
37 DECLARE_RWSEM(nvmet_config_sem);
39 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
40 const char *subsysnqn);
42 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
45 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
46 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
50 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
52 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
53 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
57 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
59 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
62 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
64 struct nvmet_req *req;
67 mutex_lock(&ctrl->lock);
68 if (!ctrl->nr_async_event_cmds) {
69 mutex_unlock(&ctrl->lock);
73 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
74 mutex_unlock(&ctrl->lock);
75 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
79 static void nvmet_async_event_work(struct work_struct *work)
81 struct nvmet_ctrl *ctrl =
82 container_of(work, struct nvmet_ctrl, async_event_work);
83 struct nvmet_async_event *aen;
84 struct nvmet_req *req;
87 mutex_lock(&ctrl->lock);
88 aen = list_first_entry_or_null(&ctrl->async_events,
89 struct nvmet_async_event, entry);
90 if (!aen || !ctrl->nr_async_event_cmds) {
91 mutex_unlock(&ctrl->lock);
95 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
96 nvmet_set_result(req, nvmet_async_event_result(aen));
98 list_del(&aen->entry);
101 mutex_unlock(&ctrl->lock);
102 nvmet_req_complete(req, 0);
106 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
107 u8 event_info, u8 log_page)
109 struct nvmet_async_event *aen;
111 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
115 aen->event_type = event_type;
116 aen->event_info = event_info;
117 aen->log_page = log_page;
119 mutex_lock(&ctrl->lock);
120 list_add_tail(&aen->entry, &ctrl->async_events);
121 mutex_unlock(&ctrl->lock);
123 schedule_work(&ctrl->async_event_work);
126 int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
130 down_write(&nvmet_config_sem);
131 if (nvmet_transports[ops->type])
134 nvmet_transports[ops->type] = ops;
135 up_write(&nvmet_config_sem);
139 EXPORT_SYMBOL_GPL(nvmet_register_transport);
141 void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
143 down_write(&nvmet_config_sem);
144 nvmet_transports[ops->type] = NULL;
145 up_write(&nvmet_config_sem);
147 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
149 int nvmet_enable_port(struct nvmet_port *port)
151 struct nvmet_fabrics_ops *ops;
154 lockdep_assert_held(&nvmet_config_sem);
156 ops = nvmet_transports[port->disc_addr.trtype];
158 up_write(&nvmet_config_sem);
159 request_module("nvmet-transport-%d", port->disc_addr.trtype);
160 down_write(&nvmet_config_sem);
161 ops = nvmet_transports[port->disc_addr.trtype];
163 pr_err("transport type %d not supported\n",
164 port->disc_addr.trtype);
169 if (!try_module_get(ops->owner))
172 ret = ops->add_port(port);
174 module_put(ops->owner);
178 port->enabled = true;
182 void nvmet_disable_port(struct nvmet_port *port)
184 struct nvmet_fabrics_ops *ops;
186 lockdep_assert_held(&nvmet_config_sem);
188 port->enabled = false;
190 ops = nvmet_transports[port->disc_addr.trtype];
191 ops->remove_port(port);
192 module_put(ops->owner);
195 static void nvmet_keep_alive_timer(struct work_struct *work)
197 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
198 struct nvmet_ctrl, ka_work);
200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
201 ctrl->cntlid, ctrl->kato);
203 ctrl->ops->delete_ctrl(ctrl);
206 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
208 if (unlikely(ctrl->kato == 0))
211 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
212 ctrl->cntlid, ctrl->kato);
214 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
215 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
218 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
220 if (unlikely(ctrl->kato == 0))
223 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
225 cancel_delayed_work_sync(&ctrl->ka_work);
228 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
233 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
234 if (ns->nsid == le32_to_cpu(nsid))
241 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
246 ns = __nvmet_find_namespace(ctrl, nsid);
248 percpu_ref_get(&ns->ref);
254 static void nvmet_destroy_namespace(struct percpu_ref *ref)
256 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
258 complete(&ns->disable_done);
261 void nvmet_put_namespace(struct nvmet_ns *ns)
263 percpu_ref_put(&ns->ref);
266 int nvmet_ns_enable(struct nvmet_ns *ns)
268 struct nvmet_subsys *subsys = ns->subsys;
269 struct nvmet_ctrl *ctrl;
272 mutex_lock(&subsys->lock);
276 ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
278 if (IS_ERR(ns->bdev)) {
279 pr_err("nvmet: failed to open block device %s: (%ld)\n",
280 ns->device_path, PTR_ERR(ns->bdev));
281 ret = PTR_ERR(ns->bdev);
286 ns->size = i_size_read(ns->bdev->bd_inode);
287 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
289 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
294 if (ns->nsid > subsys->max_nsid)
295 subsys->max_nsid = ns->nsid;
298 * The namespaces list needs to be sorted to simplify the implementation
299 * of the Identify Namepace List subcommand.
301 if (list_empty(&subsys->namespaces)) {
302 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
304 struct nvmet_ns *old;
306 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
307 BUG_ON(ns->nsid == old->nsid);
308 if (ns->nsid < old->nsid)
312 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
315 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
316 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
321 mutex_unlock(&subsys->lock);
324 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
329 void nvmet_ns_disable(struct nvmet_ns *ns)
331 struct nvmet_subsys *subsys = ns->subsys;
332 struct nvmet_ctrl *ctrl;
334 mutex_lock(&subsys->lock);
339 list_del_rcu(&ns->dev_link);
340 mutex_unlock(&subsys->lock);
343 * Now that we removed the namespaces from the lookup list, we
344 * can kill the per_cpu ref and wait for any remaining references
345 * to be dropped, as well as a RCU grace period for anyone only
346 * using the namepace under rcu_read_lock(). Note that we can't
347 * use call_rcu here as we need to ensure the namespaces have
348 * been fully destroyed before unloading the module.
350 percpu_ref_kill(&ns->ref);
352 wait_for_completion(&ns->disable_done);
353 percpu_ref_exit(&ns->ref);
355 mutex_lock(&subsys->lock);
356 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
357 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
360 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
362 mutex_unlock(&subsys->lock);
365 void nvmet_ns_free(struct nvmet_ns *ns)
367 nvmet_ns_disable(ns);
369 kfree(ns->device_path);
373 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
377 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
381 INIT_LIST_HEAD(&ns->dev_link);
382 init_completion(&ns->disable_done);
390 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
393 nvmet_set_status(req, status);
395 /* XXX: need to fill in something useful for sq_head */
396 req->rsp->sq_head = 0;
397 if (likely(req->sq)) /* may happen during early failure */
398 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
399 req->rsp->command_id = req->cmd->common.command_id;
402 nvmet_put_namespace(req->ns);
403 req->ops->queue_response(req);
406 void nvmet_req_complete(struct nvmet_req *req, u16 status)
408 __nvmet_req_complete(req, status);
409 percpu_ref_put(&req->sq->ref);
411 EXPORT_SYMBOL_GPL(nvmet_req_complete);
413 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
422 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
431 static void nvmet_confirm_sq(struct percpu_ref *ref)
433 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
435 complete(&sq->confirm_done);
438 void nvmet_sq_destroy(struct nvmet_sq *sq)
441 * If this is the admin queue, complete all AERs so that our
442 * queue doesn't have outstanding requests on it.
444 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
445 nvmet_async_events_free(sq->ctrl);
446 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
447 wait_for_completion(&sq->confirm_done);
448 wait_for_completion(&sq->free_done);
449 percpu_ref_exit(&sq->ref);
452 nvmet_ctrl_put(sq->ctrl);
453 sq->ctrl = NULL; /* allows reusing the queue later */
456 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
458 static void nvmet_sq_free(struct percpu_ref *ref)
460 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
462 complete(&sq->free_done);
465 int nvmet_sq_init(struct nvmet_sq *sq)
469 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
471 pr_err("percpu_ref init failed!\n");
474 init_completion(&sq->free_done);
475 init_completion(&sq->confirm_done);
479 EXPORT_SYMBOL_GPL(nvmet_sq_init);
481 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
482 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
484 u8 flags = req->cmd->common.flags;
492 req->rsp->status = 0;
494 /* no support for fused commands yet */
495 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
496 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
501 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
502 * contains an address of a single contiguous physical buffer that is
505 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
506 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
510 if (unlikely(!req->sq->ctrl))
511 /* will return an error for any Non-connect command: */
512 status = nvmet_parse_connect_cmd(req);
513 else if (likely(req->sq->qid != 0))
514 status = nvmet_parse_io_cmd(req);
515 else if (req->cmd->common.opcode == nvme_fabrics_command)
516 status = nvmet_parse_fabrics_cmd(req);
517 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
518 status = nvmet_parse_discovery_cmd(req);
520 status = nvmet_parse_admin_cmd(req);
525 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
526 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
533 __nvmet_req_complete(req, status);
536 EXPORT_SYMBOL_GPL(nvmet_req_init);
538 static inline bool nvmet_cc_en(u32 cc)
543 static inline u8 nvmet_cc_css(u32 cc)
545 return (cc >> 4) & 0x7;
548 static inline u8 nvmet_cc_mps(u32 cc)
550 return (cc >> 7) & 0xf;
553 static inline u8 nvmet_cc_ams(u32 cc)
555 return (cc >> 11) & 0x7;
558 static inline u8 nvmet_cc_shn(u32 cc)
560 return (cc >> 14) & 0x3;
563 static inline u8 nvmet_cc_iosqes(u32 cc)
565 return (cc >> 16) & 0xf;
568 static inline u8 nvmet_cc_iocqes(u32 cc)
570 return (cc >> 20) & 0xf;
573 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
575 lockdep_assert_held(&ctrl->lock);
578 * Only I/O controllers should verify iosqes,iocqes.
579 * Strictly speaking, the spec says a discovery controller
580 * should verify iosqes,iocqes are zeroed, however that
581 * would break backwards compatibility, so don't enforce it.
583 if (ctrl->subsys->type != NVME_NQN_DISC &&
584 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
585 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
586 ctrl->csts = NVME_CSTS_CFS;
590 if (nvmet_cc_mps(ctrl->cc) != 0 ||
591 nvmet_cc_ams(ctrl->cc) != 0 ||
592 nvmet_cc_css(ctrl->cc) != 0) {
593 ctrl->csts = NVME_CSTS_CFS;
597 ctrl->csts = NVME_CSTS_RDY;
600 * Controllers that are not yet enabled should not really enforce the
601 * keep alive timeout, but we still want to track a timeout and cleanup
602 * in case a host died before it enabled the controller. Hence, simply
603 * reset the keep alive timer when the controller is enabled.
606 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
609 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
611 lockdep_assert_held(&ctrl->lock);
613 /* XXX: tear down queues? */
614 ctrl->csts &= ~NVME_CSTS_RDY;
618 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
622 mutex_lock(&ctrl->lock);
626 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
627 nvmet_start_ctrl(ctrl);
628 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
629 nvmet_clear_ctrl(ctrl);
630 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
631 nvmet_clear_ctrl(ctrl);
632 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
634 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
635 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
636 mutex_unlock(&ctrl->lock);
639 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
641 /* command sets supported: NVMe command set: */
642 ctrl->cap = (1ULL << 37);
643 /* CC.EN timeout in 500msec units: */
644 ctrl->cap |= (15ULL << 24);
645 /* maximum queue entries supported: */
646 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
649 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
650 struct nvmet_req *req, struct nvmet_ctrl **ret)
652 struct nvmet_subsys *subsys;
653 struct nvmet_ctrl *ctrl;
656 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
658 pr_warn("connect request for invalid subsystem %s!\n",
660 req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
661 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
664 mutex_lock(&subsys->lock);
665 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
666 if (ctrl->cntlid == cntlid) {
667 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
668 pr_warn("hostnqn mismatch.\n");
671 if (!kref_get_unless_zero(&ctrl->ref))
679 pr_warn("could not find controller %d for subsys %s / host %s\n",
680 cntlid, subsysnqn, hostnqn);
681 req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
682 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
685 mutex_unlock(&subsys->lock);
686 nvmet_subsys_put(subsys);
690 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
693 struct nvmet_host_link *p;
695 if (subsys->allow_any_host)
698 list_for_each_entry(p, &subsys->hosts, entry) {
699 if (!strcmp(nvmet_host_name(p->host), hostnqn))
706 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
709 struct nvmet_subsys_link *s;
711 list_for_each_entry(s, &req->port->subsystems, entry) {
712 if (__nvmet_host_allowed(s->subsys, hostnqn))
719 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
722 lockdep_assert_held(&nvmet_config_sem);
724 if (subsys->type == NVME_NQN_DISC)
725 return nvmet_host_discovery_allowed(req, hostnqn);
727 return __nvmet_host_allowed(subsys, hostnqn);
730 static void nvmet_fatal_error_handler(struct work_struct *work)
732 struct nvmet_ctrl *ctrl =
733 container_of(work, struct nvmet_ctrl, fatal_err_work);
735 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
736 ctrl->ops->delete_ctrl(ctrl);
739 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
740 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
742 struct nvmet_subsys *subsys;
743 struct nvmet_ctrl *ctrl;
747 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
748 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
750 pr_warn("connect request for invalid subsystem %s!\n",
752 req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
756 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
757 down_read(&nvmet_config_sem);
758 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
759 pr_info("connect by host %s for subsystem %s not allowed\n",
761 req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn);
762 up_read(&nvmet_config_sem);
763 goto out_put_subsystem;
765 up_read(&nvmet_config_sem);
767 status = NVME_SC_INTERNAL;
768 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
770 goto out_put_subsystem;
771 mutex_init(&ctrl->lock);
773 nvmet_init_cap(ctrl);
775 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
776 INIT_LIST_HEAD(&ctrl->async_events);
777 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
779 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
780 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
782 kref_init(&ctrl->ref);
783 ctrl->subsys = subsys;
785 ctrl->cqs = kcalloc(subsys->max_qid + 1,
786 sizeof(struct nvmet_cq *),
791 ctrl->sqs = kcalloc(subsys->max_qid + 1,
792 sizeof(struct nvmet_sq *),
797 ret = ida_simple_get(&subsys->cntlid_ida,
798 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
801 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
806 ctrl->ops = req->ops;
807 if (ctrl->subsys->type == NVME_NQN_DISC) {
808 /* Don't accept keep-alive timeout for discovery controllers */
810 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
815 * Discovery controllers use some arbitrary high value in order
816 * to cleanup stale discovery sessions
818 * From the latest base diff RC:
819 * "The Keep Alive command is not supported by
820 * Discovery controllers. A transport may specify a
821 * fixed Discovery controller activity timeout value
822 * (e.g., 2 minutes). If no commands are received
823 * by a Discovery controller within that time
824 * period, the controller may perform the
825 * actions for Keep Alive Timer expiration".
827 ctrl->kato = NVMET_DISC_KATO;
829 /* keep-alive timeout in seconds */
830 ctrl->kato = DIV_ROUND_UP(kato, 1000);
832 nvmet_start_keep_alive_timer(ctrl);
834 mutex_lock(&subsys->lock);
835 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
836 mutex_unlock(&subsys->lock);
848 nvmet_subsys_put(subsys);
853 static void nvmet_ctrl_free(struct kref *ref)
855 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
856 struct nvmet_subsys *subsys = ctrl->subsys;
858 nvmet_stop_keep_alive_timer(ctrl);
860 mutex_lock(&subsys->lock);
861 list_del(&ctrl->subsys_entry);
862 mutex_unlock(&subsys->lock);
864 flush_work(&ctrl->async_event_work);
865 cancel_work_sync(&ctrl->fatal_err_work);
867 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
868 nvmet_subsys_put(subsys);
875 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
877 kref_put(&ctrl->ref, nvmet_ctrl_free);
880 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
882 mutex_lock(&ctrl->lock);
883 if (!(ctrl->csts & NVME_CSTS_CFS)) {
884 ctrl->csts |= NVME_CSTS_CFS;
885 schedule_work(&ctrl->fatal_err_work);
887 mutex_unlock(&ctrl->lock);
889 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
891 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
892 const char *subsysnqn)
894 struct nvmet_subsys_link *p;
899 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
901 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
903 return nvmet_disc_subsys;
906 down_read(&nvmet_config_sem);
907 list_for_each_entry(p, &port->subsystems, entry) {
908 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
910 if (!kref_get_unless_zero(&p->subsys->ref))
912 up_read(&nvmet_config_sem);
916 up_read(&nvmet_config_sem);
920 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
921 enum nvme_subsys_type type)
923 struct nvmet_subsys *subsys;
925 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
929 subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
930 /* generate a random serial number as our controllers are ephemeral: */
931 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
935 subsys->max_qid = NVMET_NR_QUEUES;
941 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
946 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
948 if (!subsys->subsysnqn) {
953 kref_init(&subsys->ref);
955 mutex_init(&subsys->lock);
956 INIT_LIST_HEAD(&subsys->namespaces);
957 INIT_LIST_HEAD(&subsys->ctrls);
959 ida_init(&subsys->cntlid_ida);
961 INIT_LIST_HEAD(&subsys->hosts);
966 static void nvmet_subsys_free(struct kref *ref)
968 struct nvmet_subsys *subsys =
969 container_of(ref, struct nvmet_subsys, ref);
971 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
973 ida_destroy(&subsys->cntlid_ida);
974 kfree(subsys->subsysnqn);
978 void nvmet_subsys_put(struct nvmet_subsys *subsys)
980 kref_put(&subsys->ref, nvmet_subsys_free);
983 static int __init nvmet_init(void)
987 error = nvmet_init_discovery();
991 error = nvmet_init_configfs();
993 goto out_exit_discovery;
997 nvmet_exit_discovery();
1002 static void __exit nvmet_exit(void)
1004 nvmet_exit_configfs();
1005 nvmet_exit_discovery();
1007 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1008 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1011 module_init(nvmet_init);
1012 module_exit(nvmet_exit);
1014 MODULE_LICENSE("GPL v2");