Mention branches and keyring.
[releases.git] / target / core.c
1 /*
2  * Common code for the NVMe target.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/rculist.h>
18
19 #include "nvmet.h"
20
21 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
22 static DEFINE_IDA(cntlid_ida);
23
24 /*
25  * This read/write semaphore is used to synchronize access to configuration
26  * information on a target system that will result in discovery log page
27  * information change for at least one host.
28  * The full list of resources to protected by this semaphore is:
29  *
30  *  - subsystems list
31  *  - per-subsystem allowed hosts list
32  *  - allow_any_host subsystem attribute
33  *  - nvmet_genctr
34  *  - the nvmet_transports array
35  *
36  * When updating any of those lists/structures write lock should be obtained,
37  * while when reading (popolating discovery log page or checking host-subsystem
38  * link) read lock is obtained to allow concurrent reads.
39  */
40 DECLARE_RWSEM(nvmet_config_sem);
41
42 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
43                 const char *subsysnqn);
44
45 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
46                 size_t len)
47 {
48         if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
49                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
50         return 0;
51 }
52
53 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
54 {
55         if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
56                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
57         return 0;
58 }
59
60 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
61 {
62         return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
63 }
64
65 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
66 {
67         struct nvmet_req *req;
68
69         while (1) {
70                 mutex_lock(&ctrl->lock);
71                 if (!ctrl->nr_async_event_cmds) {
72                         mutex_unlock(&ctrl->lock);
73                         return;
74                 }
75
76                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
77                 mutex_unlock(&ctrl->lock);
78                 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
79         }
80 }
81
82 static void nvmet_async_event_work(struct work_struct *work)
83 {
84         struct nvmet_ctrl *ctrl =
85                 container_of(work, struct nvmet_ctrl, async_event_work);
86         struct nvmet_async_event *aen;
87         struct nvmet_req *req;
88
89         while (1) {
90                 mutex_lock(&ctrl->lock);
91                 aen = list_first_entry_or_null(&ctrl->async_events,
92                                 struct nvmet_async_event, entry);
93                 if (!aen || !ctrl->nr_async_event_cmds) {
94                         mutex_unlock(&ctrl->lock);
95                         return;
96                 }
97
98                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
99                 nvmet_set_result(req, nvmet_async_event_result(aen));
100
101                 list_del(&aen->entry);
102                 kfree(aen);
103
104                 mutex_unlock(&ctrl->lock);
105                 nvmet_req_complete(req, 0);
106         }
107 }
108
109 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
110                 u8 event_info, u8 log_page)
111 {
112         struct nvmet_async_event *aen;
113
114         aen = kmalloc(sizeof(*aen), GFP_KERNEL);
115         if (!aen)
116                 return;
117
118         aen->event_type = event_type;
119         aen->event_info = event_info;
120         aen->log_page = log_page;
121
122         mutex_lock(&ctrl->lock);
123         list_add_tail(&aen->entry, &ctrl->async_events);
124         mutex_unlock(&ctrl->lock);
125
126         schedule_work(&ctrl->async_event_work);
127 }
128
129 int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
130 {
131         int ret = 0;
132
133         down_write(&nvmet_config_sem);
134         if (nvmet_transports[ops->type])
135                 ret = -EINVAL;
136         else
137                 nvmet_transports[ops->type] = ops;
138         up_write(&nvmet_config_sem);
139
140         return ret;
141 }
142 EXPORT_SYMBOL_GPL(nvmet_register_transport);
143
144 void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
145 {
146         down_write(&nvmet_config_sem);
147         nvmet_transports[ops->type] = NULL;
148         up_write(&nvmet_config_sem);
149 }
150 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
151
152 int nvmet_enable_port(struct nvmet_port *port)
153 {
154         struct nvmet_fabrics_ops *ops;
155         int ret;
156
157         lockdep_assert_held(&nvmet_config_sem);
158
159         ops = nvmet_transports[port->disc_addr.trtype];
160         if (!ops) {
161                 up_write(&nvmet_config_sem);
162                 request_module("nvmet-transport-%d", port->disc_addr.trtype);
163                 down_write(&nvmet_config_sem);
164                 ops = nvmet_transports[port->disc_addr.trtype];
165                 if (!ops) {
166                         pr_err("transport type %d not supported\n",
167                                 port->disc_addr.trtype);
168                         return -EINVAL;
169                 }
170         }
171
172         if (!try_module_get(ops->owner))
173                 return -EINVAL;
174
175         ret = ops->add_port(port);
176         if (ret) {
177                 module_put(ops->owner);
178                 return ret;
179         }
180
181         port->enabled = true;
182         return 0;
183 }
184
185 void nvmet_disable_port(struct nvmet_port *port)
186 {
187         struct nvmet_fabrics_ops *ops;
188
189         lockdep_assert_held(&nvmet_config_sem);
190
191         port->enabled = false;
192
193         ops = nvmet_transports[port->disc_addr.trtype];
194         ops->remove_port(port);
195         module_put(ops->owner);
196 }
197
198 static void nvmet_keep_alive_timer(struct work_struct *work)
199 {
200         struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
201                         struct nvmet_ctrl, ka_work);
202
203         pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
204                 ctrl->cntlid, ctrl->kato);
205
206         nvmet_ctrl_fatal_error(ctrl);
207 }
208
209 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
210 {
211         if (unlikely(ctrl->kato == 0))
212                 return;
213
214         pr_debug("ctrl %d start keep-alive timer for %d secs\n",
215                 ctrl->cntlid, ctrl->kato);
216
217         INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
218         schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
219 }
220
221 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
222 {
223         if (unlikely(ctrl->kato == 0))
224                 return;
225
226         pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
227
228         cancel_delayed_work_sync(&ctrl->ka_work);
229 }
230
231 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
232                 __le32 nsid)
233 {
234         struct nvmet_ns *ns;
235
236         list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
237                 if (ns->nsid == le32_to_cpu(nsid))
238                         return ns;
239         }
240
241         return NULL;
242 }
243
244 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
245 {
246         struct nvmet_ns *ns;
247
248         rcu_read_lock();
249         ns = __nvmet_find_namespace(ctrl, nsid);
250         if (ns)
251                 percpu_ref_get(&ns->ref);
252         rcu_read_unlock();
253
254         return ns;
255 }
256
257 static void nvmet_destroy_namespace(struct percpu_ref *ref)
258 {
259         struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
260
261         complete(&ns->disable_done);
262 }
263
264 void nvmet_put_namespace(struct nvmet_ns *ns)
265 {
266         percpu_ref_put(&ns->ref);
267 }
268
269 int nvmet_ns_enable(struct nvmet_ns *ns)
270 {
271         struct nvmet_subsys *subsys = ns->subsys;
272         struct nvmet_ctrl *ctrl;
273         int ret = 0;
274
275         mutex_lock(&subsys->lock);
276         if (ns->enabled)
277                 goto out_unlock;
278
279         ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
280                         NULL);
281         if (IS_ERR(ns->bdev)) {
282                 pr_err("failed to open block device %s: (%ld)\n",
283                        ns->device_path, PTR_ERR(ns->bdev));
284                 ret = PTR_ERR(ns->bdev);
285                 ns->bdev = NULL;
286                 goto out_unlock;
287         }
288
289         ns->size = i_size_read(ns->bdev->bd_inode);
290         ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
291
292         ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
293                                 0, GFP_KERNEL);
294         if (ret)
295                 goto out_blkdev_put;
296
297         if (ns->nsid > subsys->max_nsid)
298                 subsys->max_nsid = ns->nsid;
299
300         /*
301          * The namespaces list needs to be sorted to simplify the implementation
302          * of the Identify Namepace List subcommand.
303          */
304         if (list_empty(&subsys->namespaces)) {
305                 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
306         } else {
307                 struct nvmet_ns *old;
308
309                 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
310                         BUG_ON(ns->nsid == old->nsid);
311                         if (ns->nsid < old->nsid)
312                                 break;
313                 }
314
315                 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
316         }
317
318         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
319                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
320
321         ns->enabled = true;
322         ret = 0;
323 out_unlock:
324         mutex_unlock(&subsys->lock);
325         return ret;
326 out_blkdev_put:
327         blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
328         ns->bdev = NULL;
329         goto out_unlock;
330 }
331
332 void nvmet_ns_disable(struct nvmet_ns *ns)
333 {
334         struct nvmet_subsys *subsys = ns->subsys;
335         struct nvmet_ctrl *ctrl;
336
337         mutex_lock(&subsys->lock);
338         if (!ns->enabled)
339                 goto out_unlock;
340
341         ns->enabled = false;
342         list_del_rcu(&ns->dev_link);
343         mutex_unlock(&subsys->lock);
344
345         /*
346          * Now that we removed the namespaces from the lookup list, we
347          * can kill the per_cpu ref and wait for any remaining references
348          * to be dropped, as well as a RCU grace period for anyone only
349          * using the namepace under rcu_read_lock().  Note that we can't
350          * use call_rcu here as we need to ensure the namespaces have
351          * been fully destroyed before unloading the module.
352          */
353         percpu_ref_kill(&ns->ref);
354         synchronize_rcu();
355         wait_for_completion(&ns->disable_done);
356         percpu_ref_exit(&ns->ref);
357
358         mutex_lock(&subsys->lock);
359         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
360                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
361
362         if (ns->bdev)
363                 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
364 out_unlock:
365         mutex_unlock(&subsys->lock);
366 }
367
368 void nvmet_ns_free(struct nvmet_ns *ns)
369 {
370         nvmet_ns_disable(ns);
371
372         kfree(ns->device_path);
373         kfree(ns);
374 }
375
376 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
377 {
378         struct nvmet_ns *ns;
379
380         ns = kzalloc(sizeof(*ns), GFP_KERNEL);
381         if (!ns)
382                 return NULL;
383
384         INIT_LIST_HEAD(&ns->dev_link);
385         init_completion(&ns->disable_done);
386
387         ns->nsid = nsid;
388         ns->subsys = subsys;
389         uuid_gen(&ns->uuid);
390
391         return ns;
392 }
393
394 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
395 {
396         u32 old_sqhd, new_sqhd;
397         u16 sqhd;
398
399         if (status)
400                 nvmet_set_status(req, status);
401
402         if (req->sq->size) {
403                 do {
404                         old_sqhd = req->sq->sqhd;
405                         new_sqhd = (old_sqhd + 1) % req->sq->size;
406                 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
407                                         old_sqhd);
408         }
409         sqhd = req->sq->sqhd & 0x0000FFFF;
410         req->rsp->sq_head = cpu_to_le16(sqhd);
411         req->rsp->sq_id = cpu_to_le16(req->sq->qid);
412         req->rsp->command_id = req->cmd->common.command_id;
413
414         if (req->ns)
415                 nvmet_put_namespace(req->ns);
416         req->ops->queue_response(req);
417 }
418
419 void nvmet_req_complete(struct nvmet_req *req, u16 status)
420 {
421         struct nvmet_sq *sq = req->sq;
422
423         __nvmet_req_complete(req, status);
424         percpu_ref_put(&sq->ref);
425 }
426 EXPORT_SYMBOL_GPL(nvmet_req_complete);
427
428 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
429                 u16 qid, u16 size)
430 {
431         cq->qid = qid;
432         cq->size = size;
433
434         ctrl->cqs[qid] = cq;
435 }
436
437 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
438                 u16 qid, u16 size)
439 {
440         sq->sqhd = 0;
441         sq->qid = qid;
442         sq->size = size;
443
444         ctrl->sqs[qid] = sq;
445 }
446
447 static void nvmet_confirm_sq(struct percpu_ref *ref)
448 {
449         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
450
451         complete(&sq->confirm_done);
452 }
453
454 void nvmet_sq_destroy(struct nvmet_sq *sq)
455 {
456         /*
457          * If this is the admin queue, complete all AERs so that our
458          * queue doesn't have outstanding requests on it.
459          */
460         if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
461                 nvmet_async_events_free(sq->ctrl);
462         percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
463         wait_for_completion(&sq->confirm_done);
464         wait_for_completion(&sq->free_done);
465         percpu_ref_exit(&sq->ref);
466
467         if (sq->ctrl) {
468                 nvmet_ctrl_put(sq->ctrl);
469                 sq->ctrl = NULL; /* allows reusing the queue later */
470         }
471 }
472 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
473
474 static void nvmet_sq_free(struct percpu_ref *ref)
475 {
476         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
477
478         complete(&sq->free_done);
479 }
480
481 int nvmet_sq_init(struct nvmet_sq *sq)
482 {
483         int ret;
484
485         ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
486         if (ret) {
487                 pr_err("percpu_ref init failed!\n");
488                 return ret;
489         }
490         init_completion(&sq->free_done);
491         init_completion(&sq->confirm_done);
492
493         return 0;
494 }
495 EXPORT_SYMBOL_GPL(nvmet_sq_init);
496
497 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
498                 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
499 {
500         u8 flags = req->cmd->common.flags;
501         u16 status;
502
503         req->cq = cq;
504         req->sq = sq;
505         req->ops = ops;
506         req->sg = NULL;
507         req->sg_cnt = 0;
508         req->rsp->status = 0;
509
510         /* no support for fused commands yet */
511         if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
512                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
513                 goto fail;
514         }
515
516         /*
517          * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
518          * contains an address of a single contiguous physical buffer that is
519          * byte aligned.
520          */
521         if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
522                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
523                 goto fail;
524         }
525
526         if (unlikely(!req->sq->ctrl))
527                 /* will return an error for any Non-connect command: */
528                 status = nvmet_parse_connect_cmd(req);
529         else if (likely(req->sq->qid != 0))
530                 status = nvmet_parse_io_cmd(req);
531         else if (req->cmd->common.opcode == nvme_fabrics_command)
532                 status = nvmet_parse_fabrics_cmd(req);
533         else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
534                 status = nvmet_parse_discovery_cmd(req);
535         else
536                 status = nvmet_parse_admin_cmd(req);
537
538         if (status)
539                 goto fail;
540
541         if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
542                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
543                 goto fail;
544         }
545
546         return true;
547
548 fail:
549         __nvmet_req_complete(req, status);
550         return false;
551 }
552 EXPORT_SYMBOL_GPL(nvmet_req_init);
553
554 void nvmet_req_uninit(struct nvmet_req *req)
555 {
556         percpu_ref_put(&req->sq->ref);
557 }
558 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
559
560 static inline bool nvmet_cc_en(u32 cc)
561 {
562         return (cc >> NVME_CC_EN_SHIFT) & 0x1;
563 }
564
565 static inline u8 nvmet_cc_css(u32 cc)
566 {
567         return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
568 }
569
570 static inline u8 nvmet_cc_mps(u32 cc)
571 {
572         return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
573 }
574
575 static inline u8 nvmet_cc_ams(u32 cc)
576 {
577         return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
578 }
579
580 static inline u8 nvmet_cc_shn(u32 cc)
581 {
582         return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
583 }
584
585 static inline u8 nvmet_cc_iosqes(u32 cc)
586 {
587         return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
588 }
589
590 static inline u8 nvmet_cc_iocqes(u32 cc)
591 {
592         return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
593 }
594
595 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
596 {
597         lockdep_assert_held(&ctrl->lock);
598
599         /*
600          * Only I/O controllers should verify iosqes,iocqes.
601          * Strictly speaking, the spec says a discovery controller
602          * should verify iosqes,iocqes are zeroed, however that
603          * would break backwards compatibility, so don't enforce it.
604          */
605         if (ctrl->subsys->type != NVME_NQN_DISC &&
606             (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
607              nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
608                 ctrl->csts = NVME_CSTS_CFS;
609                 return;
610         }
611
612         if (nvmet_cc_mps(ctrl->cc) != 0 ||
613             nvmet_cc_ams(ctrl->cc) != 0 ||
614             nvmet_cc_css(ctrl->cc) != 0) {
615                 ctrl->csts = NVME_CSTS_CFS;
616                 return;
617         }
618
619         ctrl->csts = NVME_CSTS_RDY;
620
621         /*
622          * Controllers that are not yet enabled should not really enforce the
623          * keep alive timeout, but we still want to track a timeout and cleanup
624          * in case a host died before it enabled the controller.  Hence, simply
625          * reset the keep alive timer when the controller is enabled.
626          */
627         if (ctrl->kato)
628                 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
629 }
630
631 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
632 {
633         lockdep_assert_held(&ctrl->lock);
634
635         /* XXX: tear down queues? */
636         ctrl->csts &= ~NVME_CSTS_RDY;
637         ctrl->cc = 0;
638 }
639
640 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
641 {
642         u32 old;
643
644         mutex_lock(&ctrl->lock);
645         old = ctrl->cc;
646         ctrl->cc = new;
647
648         if (nvmet_cc_en(new) && !nvmet_cc_en(old))
649                 nvmet_start_ctrl(ctrl);
650         if (!nvmet_cc_en(new) && nvmet_cc_en(old))
651                 nvmet_clear_ctrl(ctrl);
652         if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
653                 nvmet_clear_ctrl(ctrl);
654                 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
655         }
656         if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
657                 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
658         mutex_unlock(&ctrl->lock);
659 }
660
661 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
662 {
663         /* command sets supported: NVMe command set: */
664         ctrl->cap = (1ULL << 37);
665         /* CC.EN timeout in 500msec units: */
666         ctrl->cap |= (15ULL << 24);
667         /* maximum queue entries supported: */
668         ctrl->cap |= NVMET_QUEUE_SIZE - 1;
669 }
670
671 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
672                 struct nvmet_req *req, struct nvmet_ctrl **ret)
673 {
674         struct nvmet_subsys *subsys;
675         struct nvmet_ctrl *ctrl;
676         u16 status = 0;
677
678         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
679         if (!subsys) {
680                 pr_warn("connect request for invalid subsystem %s!\n",
681                         subsysnqn);
682                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
683                 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
684         }
685
686         mutex_lock(&subsys->lock);
687         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
688                 if (ctrl->cntlid == cntlid) {
689                         if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
690                                 pr_warn("hostnqn mismatch.\n");
691                                 continue;
692                         }
693                         if (!kref_get_unless_zero(&ctrl->ref))
694                                 continue;
695
696                         *ret = ctrl;
697                         goto out;
698                 }
699         }
700
701         pr_warn("could not find controller %d for subsys %s / host %s\n",
702                 cntlid, subsysnqn, hostnqn);
703         req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
704         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
705
706 out:
707         mutex_unlock(&subsys->lock);
708         nvmet_subsys_put(subsys);
709         return status;
710 }
711
712 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
713 {
714         if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
715                 pr_err("got io cmd %d while CC.EN == 0 on qid = %d\n",
716                        cmd->common.opcode, req->sq->qid);
717                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
718         }
719
720         if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
721                 pr_err("got io cmd %d while CSTS.RDY == 0 on qid = %d\n",
722                        cmd->common.opcode, req->sq->qid);
723                 req->ns = NULL;
724                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
725         }
726         return 0;
727 }
728
729 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
730                 const char *hostnqn)
731 {
732         struct nvmet_host_link *p;
733
734         if (subsys->allow_any_host)
735                 return true;
736
737         list_for_each_entry(p, &subsys->hosts, entry) {
738                 if (!strcmp(nvmet_host_name(p->host), hostnqn))
739                         return true;
740         }
741
742         return false;
743 }
744
745 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
746                 const char *hostnqn)
747 {
748         struct nvmet_subsys_link *s;
749
750         list_for_each_entry(s, &req->port->subsystems, entry) {
751                 if (__nvmet_host_allowed(s->subsys, hostnqn))
752                         return true;
753         }
754
755         return false;
756 }
757
758 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
759                 const char *hostnqn)
760 {
761         lockdep_assert_held(&nvmet_config_sem);
762
763         if (subsys->type == NVME_NQN_DISC)
764                 return nvmet_host_discovery_allowed(req, hostnqn);
765         else
766                 return __nvmet_host_allowed(subsys, hostnqn);
767 }
768
769 static void nvmet_fatal_error_handler(struct work_struct *work)
770 {
771         struct nvmet_ctrl *ctrl =
772                         container_of(work, struct nvmet_ctrl, fatal_err_work);
773
774         pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
775         ctrl->ops->delete_ctrl(ctrl);
776 }
777
778 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
779                 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
780 {
781         struct nvmet_subsys *subsys;
782         struct nvmet_ctrl *ctrl;
783         int ret;
784         u16 status;
785
786         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
787         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
788         if (!subsys) {
789                 pr_warn("connect request for invalid subsystem %s!\n",
790                         subsysnqn);
791                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
792                 goto out;
793         }
794
795         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
796         down_read(&nvmet_config_sem);
797         if (!nvmet_host_allowed(req, subsys, hostnqn)) {
798                 pr_info("connect by host %s for subsystem %s not allowed\n",
799                         hostnqn, subsysnqn);
800                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
801                 up_read(&nvmet_config_sem);
802                 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
803                 goto out_put_subsystem;
804         }
805         up_read(&nvmet_config_sem);
806
807         status = NVME_SC_INTERNAL;
808         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
809         if (!ctrl)
810                 goto out_put_subsystem;
811         mutex_init(&ctrl->lock);
812
813         nvmet_init_cap(ctrl);
814
815         INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
816         INIT_LIST_HEAD(&ctrl->async_events);
817         INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
818
819         memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
820         memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
821
822         kref_init(&ctrl->ref);
823         ctrl->subsys = subsys;
824
825         ctrl->cqs = kcalloc(subsys->max_qid + 1,
826                         sizeof(struct nvmet_cq *),
827                         GFP_KERNEL);
828         if (!ctrl->cqs)
829                 goto out_free_ctrl;
830
831         ctrl->sqs = kcalloc(subsys->max_qid + 1,
832                         sizeof(struct nvmet_sq *),
833                         GFP_KERNEL);
834         if (!ctrl->sqs)
835                 goto out_free_cqs;
836
837         ret = ida_simple_get(&cntlid_ida,
838                              NVME_CNTLID_MIN, NVME_CNTLID_MAX,
839                              GFP_KERNEL);
840         if (ret < 0) {
841                 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
842                 goto out_free_sqs;
843         }
844         ctrl->cntlid = ret;
845
846         ctrl->ops = req->ops;
847         if (ctrl->subsys->type == NVME_NQN_DISC) {
848                 /* Don't accept keep-alive timeout for discovery controllers */
849                 if (kato) {
850                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
851                         goto out_free_sqs;
852                 }
853
854                 /*
855                  * Discovery controllers use some arbitrary high value in order
856                  * to cleanup stale discovery sessions
857                  *
858                  * From the latest base diff RC:
859                  * "The Keep Alive command is not supported by
860                  * Discovery controllers. A transport may specify a
861                  * fixed Discovery controller activity timeout value
862                  * (e.g., 2 minutes).  If no commands are received
863                  * by a Discovery controller within that time
864                  * period, the controller may perform the
865                  * actions for Keep Alive Timer expiration".
866                  */
867                 ctrl->kato = NVMET_DISC_KATO;
868         } else {
869                 /* keep-alive timeout in seconds */
870                 ctrl->kato = DIV_ROUND_UP(kato, 1000);
871         }
872         nvmet_start_keep_alive_timer(ctrl);
873
874         mutex_lock(&subsys->lock);
875         list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
876         mutex_unlock(&subsys->lock);
877
878         *ctrlp = ctrl;
879         return 0;
880
881 out_free_sqs:
882         kfree(ctrl->sqs);
883 out_free_cqs:
884         kfree(ctrl->cqs);
885 out_free_ctrl:
886         kfree(ctrl);
887 out_put_subsystem:
888         nvmet_subsys_put(subsys);
889 out:
890         return status;
891 }
892
893 static void nvmet_ctrl_free(struct kref *ref)
894 {
895         struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
896         struct nvmet_subsys *subsys = ctrl->subsys;
897
898         nvmet_stop_keep_alive_timer(ctrl);
899
900         mutex_lock(&subsys->lock);
901         list_del(&ctrl->subsys_entry);
902         mutex_unlock(&subsys->lock);
903
904         flush_work(&ctrl->async_event_work);
905         cancel_work_sync(&ctrl->fatal_err_work);
906
907         ida_simple_remove(&cntlid_ida, ctrl->cntlid);
908         nvmet_subsys_put(subsys);
909
910         kfree(ctrl->sqs);
911         kfree(ctrl->cqs);
912         kfree(ctrl);
913 }
914
915 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
916 {
917         kref_put(&ctrl->ref, nvmet_ctrl_free);
918 }
919
920 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
921 {
922         mutex_lock(&ctrl->lock);
923         if (!(ctrl->csts & NVME_CSTS_CFS)) {
924                 ctrl->csts |= NVME_CSTS_CFS;
925                 schedule_work(&ctrl->fatal_err_work);
926         }
927         mutex_unlock(&ctrl->lock);
928 }
929 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
930
931 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
932                 const char *subsysnqn)
933 {
934         struct nvmet_subsys_link *p;
935
936         if (!port)
937                 return NULL;
938
939         if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
940                         NVMF_NQN_SIZE)) {
941                 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
942                         return NULL;
943                 return nvmet_disc_subsys;
944         }
945
946         down_read(&nvmet_config_sem);
947         list_for_each_entry(p, &port->subsystems, entry) {
948                 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
949                                 NVMF_NQN_SIZE)) {
950                         if (!kref_get_unless_zero(&p->subsys->ref))
951                                 break;
952                         up_read(&nvmet_config_sem);
953                         return p->subsys;
954                 }
955         }
956         up_read(&nvmet_config_sem);
957         return NULL;
958 }
959
960 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
961                 enum nvme_subsys_type type)
962 {
963         struct nvmet_subsys *subsys;
964
965         subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
966         if (!subsys)
967                 return NULL;
968
969         subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
970         /* generate a random serial number as our controllers are ephemeral: */
971         get_random_bytes(&subsys->serial, sizeof(subsys->serial));
972
973         switch (type) {
974         case NVME_NQN_NVME:
975                 subsys->max_qid = NVMET_NR_QUEUES;
976                 break;
977         case NVME_NQN_DISC:
978                 subsys->max_qid = 0;
979                 break;
980         default:
981                 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
982                 kfree(subsys);
983                 return NULL;
984         }
985         subsys->type = type;
986         subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
987                         GFP_KERNEL);
988         if (!subsys->subsysnqn) {
989                 kfree(subsys);
990                 return NULL;
991         }
992
993         kref_init(&subsys->ref);
994
995         mutex_init(&subsys->lock);
996         INIT_LIST_HEAD(&subsys->namespaces);
997         INIT_LIST_HEAD(&subsys->ctrls);
998         INIT_LIST_HEAD(&subsys->hosts);
999
1000         return subsys;
1001 }
1002
1003 static void nvmet_subsys_free(struct kref *ref)
1004 {
1005         struct nvmet_subsys *subsys =
1006                 container_of(ref, struct nvmet_subsys, ref);
1007
1008         WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1009
1010         kfree(subsys->subsysnqn);
1011         kfree(subsys);
1012 }
1013
1014 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1015 {
1016         struct nvmet_ctrl *ctrl;
1017
1018         mutex_lock(&subsys->lock);
1019         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1020                 ctrl->ops->delete_ctrl(ctrl);
1021         mutex_unlock(&subsys->lock);
1022 }
1023
1024 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1025 {
1026         kref_put(&subsys->ref, nvmet_subsys_free);
1027 }
1028
1029 static int __init nvmet_init(void)
1030 {
1031         int error;
1032
1033         error = nvmet_init_discovery();
1034         if (error)
1035                 goto out;
1036
1037         error = nvmet_init_configfs();
1038         if (error)
1039                 goto out_exit_discovery;
1040         return 0;
1041
1042 out_exit_discovery:
1043         nvmet_exit_discovery();
1044 out:
1045         return error;
1046 }
1047
1048 static void __exit nvmet_exit(void)
1049 {
1050         nvmet_exit_configfs();
1051         nvmet_exit_discovery();
1052         ida_destroy(&cntlid_ida);
1053
1054         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1055         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1056 }
1057
1058 module_init(nvmet_init);
1059 module_exit(nvmet_exit);
1060
1061 MODULE_LICENSE("GPL v2");