GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / nvme / target / core.c
1 /*
2  * Common code for the NVMe target.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/rculist.h>
18
19 #include "nvmet.h"
20
21 struct workqueue_struct *buffered_io_wq;
22 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
23 static DEFINE_IDA(cntlid_ida);
24
25 /*
26  * This read/write semaphore is used to synchronize access to configuration
27  * information on a target system that will result in discovery log page
28  * information change for at least one host.
29  * The full list of resources to protected by this semaphore is:
30  *
31  *  - subsystems list
32  *  - per-subsystem allowed hosts list
33  *  - allow_any_host subsystem attribute
34  *  - nvmet_genctr
35  *  - the nvmet_transports array
36  *
37  * When updating any of those lists/structures write lock should be obtained,
38  * while when reading (popolating discovery log page or checking host-subsystem
39  * link) read lock is obtained to allow concurrent reads.
40  */
41 DECLARE_RWSEM(nvmet_config_sem);
42
43 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
44 u64 nvmet_ana_chgcnt;
45 DECLARE_RWSEM(nvmet_ana_sem);
46
47 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
48                 const char *subsysnqn);
49
50 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
51                 size_t len)
52 {
53         if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
54                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
55         return 0;
56 }
57
58 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
59 {
60         if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
61                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
62         return 0;
63 }
64
65 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
66 {
67         if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
68                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
69         return 0;
70 }
71
72 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
73 {
74         struct nvmet_ns *ns;
75
76         if (list_empty(&subsys->namespaces))
77                 return 0;
78
79         ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
80         return ns->nsid;
81 }
82
83 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
84 {
85         return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
86 }
87
88 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
89 {
90         struct nvmet_req *req;
91
92         while (1) {
93                 mutex_lock(&ctrl->lock);
94                 if (!ctrl->nr_async_event_cmds) {
95                         mutex_unlock(&ctrl->lock);
96                         return;
97                 }
98
99                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
100                 mutex_unlock(&ctrl->lock);
101                 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
102         }
103 }
104
105 static void nvmet_async_event_work(struct work_struct *work)
106 {
107         struct nvmet_ctrl *ctrl =
108                 container_of(work, struct nvmet_ctrl, async_event_work);
109         struct nvmet_async_event *aen;
110         struct nvmet_req *req;
111
112         while (1) {
113                 mutex_lock(&ctrl->lock);
114                 aen = list_first_entry_or_null(&ctrl->async_events,
115                                 struct nvmet_async_event, entry);
116                 if (!aen || !ctrl->nr_async_event_cmds) {
117                         mutex_unlock(&ctrl->lock);
118                         return;
119                 }
120
121                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
122                 nvmet_set_result(req, nvmet_async_event_result(aen));
123
124                 list_del(&aen->entry);
125                 kfree(aen);
126
127                 mutex_unlock(&ctrl->lock);
128                 nvmet_req_complete(req, 0);
129         }
130 }
131
132 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
133                 u8 event_info, u8 log_page)
134 {
135         struct nvmet_async_event *aen;
136
137         aen = kmalloc(sizeof(*aen), GFP_KERNEL);
138         if (!aen)
139                 return;
140
141         aen->event_type = event_type;
142         aen->event_info = event_info;
143         aen->log_page = log_page;
144
145         mutex_lock(&ctrl->lock);
146         list_add_tail(&aen->entry, &ctrl->async_events);
147         mutex_unlock(&ctrl->lock);
148
149         schedule_work(&ctrl->async_event_work);
150 }
151
152 static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
153 {
154         if (!(READ_ONCE(ctrl->aen_enabled) & aen))
155                 return true;
156         return test_and_set_bit(aen, &ctrl->aen_masked);
157 }
158
159 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
160 {
161         u32 i;
162
163         mutex_lock(&ctrl->lock);
164         if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
165                 goto out_unlock;
166
167         for (i = 0; i < ctrl->nr_changed_ns; i++) {
168                 if (ctrl->changed_ns_list[i] == nsid)
169                         goto out_unlock;
170         }
171
172         if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
173                 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
174                 ctrl->nr_changed_ns = U32_MAX;
175                 goto out_unlock;
176         }
177
178         ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
179 out_unlock:
180         mutex_unlock(&ctrl->lock);
181 }
182
183 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
184 {
185         struct nvmet_ctrl *ctrl;
186
187         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
188                 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
189                 if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
190                         continue;
191                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
192                                 NVME_AER_NOTICE_NS_CHANGED,
193                                 NVME_LOG_CHANGED_NS);
194         }
195 }
196
197 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
198                 struct nvmet_port *port)
199 {
200         struct nvmet_ctrl *ctrl;
201
202         mutex_lock(&subsys->lock);
203         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
204                 if (port && ctrl->port != port)
205                         continue;
206                 if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE))
207                         continue;
208                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
209                                 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
210         }
211         mutex_unlock(&subsys->lock);
212 }
213
214 void nvmet_port_send_ana_event(struct nvmet_port *port)
215 {
216         struct nvmet_subsys_link *p;
217
218         down_read(&nvmet_config_sem);
219         list_for_each_entry(p, &port->subsystems, entry)
220                 nvmet_send_ana_event(p->subsys, port);
221         up_read(&nvmet_config_sem);
222 }
223
224 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
225 {
226         int ret = 0;
227
228         down_write(&nvmet_config_sem);
229         if (nvmet_transports[ops->type])
230                 ret = -EINVAL;
231         else
232                 nvmet_transports[ops->type] = ops;
233         up_write(&nvmet_config_sem);
234
235         return ret;
236 }
237 EXPORT_SYMBOL_GPL(nvmet_register_transport);
238
239 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
240 {
241         down_write(&nvmet_config_sem);
242         nvmet_transports[ops->type] = NULL;
243         up_write(&nvmet_config_sem);
244 }
245 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
246
247 int nvmet_enable_port(struct nvmet_port *port)
248 {
249         const struct nvmet_fabrics_ops *ops;
250         int ret;
251
252         lockdep_assert_held(&nvmet_config_sem);
253
254         ops = nvmet_transports[port->disc_addr.trtype];
255         if (!ops) {
256                 up_write(&nvmet_config_sem);
257                 request_module("nvmet-transport-%d", port->disc_addr.trtype);
258                 down_write(&nvmet_config_sem);
259                 ops = nvmet_transports[port->disc_addr.trtype];
260                 if (!ops) {
261                         pr_err("transport type %d not supported\n",
262                                 port->disc_addr.trtype);
263                         return -EINVAL;
264                 }
265         }
266
267         if (!try_module_get(ops->owner))
268                 return -EINVAL;
269
270         ret = ops->add_port(port);
271         if (ret) {
272                 module_put(ops->owner);
273                 return ret;
274         }
275
276         /* If the transport didn't set inline_data_size, then disable it. */
277         if (port->inline_data_size < 0)
278                 port->inline_data_size = 0;
279
280         port->enabled = true;
281         return 0;
282 }
283
284 void nvmet_disable_port(struct nvmet_port *port)
285 {
286         const struct nvmet_fabrics_ops *ops;
287
288         lockdep_assert_held(&nvmet_config_sem);
289
290         port->enabled = false;
291
292         ops = nvmet_transports[port->disc_addr.trtype];
293         ops->remove_port(port);
294         module_put(ops->owner);
295 }
296
297 static void nvmet_keep_alive_timer(struct work_struct *work)
298 {
299         struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
300                         struct nvmet_ctrl, ka_work);
301
302         pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
303                 ctrl->cntlid, ctrl->kato);
304
305         nvmet_ctrl_fatal_error(ctrl);
306 }
307
308 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
309 {
310         if (unlikely(ctrl->kato == 0))
311                 return;
312
313         pr_debug("ctrl %d start keep-alive timer for %d secs\n",
314                 ctrl->cntlid, ctrl->kato);
315
316         INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
317         schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
318 }
319
320 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
321 {
322         if (unlikely(ctrl->kato == 0))
323                 return;
324
325         pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
326
327         cancel_delayed_work_sync(&ctrl->ka_work);
328 }
329
330 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
331                 __le32 nsid)
332 {
333         struct nvmet_ns *ns;
334
335         list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
336                 if (ns->nsid == le32_to_cpu(nsid))
337                         return ns;
338         }
339
340         return NULL;
341 }
342
343 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
344 {
345         struct nvmet_ns *ns;
346
347         rcu_read_lock();
348         ns = __nvmet_find_namespace(ctrl, nsid);
349         if (ns)
350                 percpu_ref_get(&ns->ref);
351         rcu_read_unlock();
352
353         return ns;
354 }
355
356 static void nvmet_destroy_namespace(struct percpu_ref *ref)
357 {
358         struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
359
360         complete(&ns->disable_done);
361 }
362
363 void nvmet_put_namespace(struct nvmet_ns *ns)
364 {
365         percpu_ref_put(&ns->ref);
366 }
367
368 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
369 {
370         nvmet_bdev_ns_disable(ns);
371         nvmet_file_ns_disable(ns);
372 }
373
374 int nvmet_ns_enable(struct nvmet_ns *ns)
375 {
376         struct nvmet_subsys *subsys = ns->subsys;
377         int ret;
378
379         mutex_lock(&subsys->lock);
380         ret = -EMFILE;
381         if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
382                 goto out_unlock;
383         ret = 0;
384         if (ns->enabled)
385                 goto out_unlock;
386
387         ret = nvmet_bdev_ns_enable(ns);
388         if (ret == -ENOTBLK)
389                 ret = nvmet_file_ns_enable(ns);
390         if (ret)
391                 goto out_unlock;
392
393         ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
394                                 0, GFP_KERNEL);
395         if (ret)
396                 goto out_dev_put;
397
398         if (ns->nsid > subsys->max_nsid)
399                 subsys->max_nsid = ns->nsid;
400
401         /*
402          * The namespaces list needs to be sorted to simplify the implementation
403          * of the Identify Namepace List subcommand.
404          */
405         if (list_empty(&subsys->namespaces)) {
406                 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
407         } else {
408                 struct nvmet_ns *old;
409
410                 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
411                         BUG_ON(ns->nsid == old->nsid);
412                         if (ns->nsid < old->nsid)
413                                 break;
414                 }
415
416                 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
417         }
418         subsys->nr_namespaces++;
419
420         nvmet_ns_changed(subsys, ns->nsid);
421         ns->enabled = true;
422         ret = 0;
423 out_unlock:
424         mutex_unlock(&subsys->lock);
425         return ret;
426 out_dev_put:
427         nvmet_ns_dev_disable(ns);
428         goto out_unlock;
429 }
430
431 void nvmet_ns_disable(struct nvmet_ns *ns)
432 {
433         struct nvmet_subsys *subsys = ns->subsys;
434
435         mutex_lock(&subsys->lock);
436         if (!ns->enabled)
437                 goto out_unlock;
438
439         ns->enabled = false;
440         list_del_rcu(&ns->dev_link);
441         if (ns->nsid == subsys->max_nsid)
442                 subsys->max_nsid = nvmet_max_nsid(subsys);
443         mutex_unlock(&subsys->lock);
444
445         /*
446          * Now that we removed the namespaces from the lookup list, we
447          * can kill the per_cpu ref and wait for any remaining references
448          * to be dropped, as well as a RCU grace period for anyone only
449          * using the namepace under rcu_read_lock().  Note that we can't
450          * use call_rcu here as we need to ensure the namespaces have
451          * been fully destroyed before unloading the module.
452          */
453         percpu_ref_kill(&ns->ref);
454         synchronize_rcu();
455         wait_for_completion(&ns->disable_done);
456         percpu_ref_exit(&ns->ref);
457
458         mutex_lock(&subsys->lock);
459         subsys->nr_namespaces--;
460         nvmet_ns_changed(subsys, ns->nsid);
461         nvmet_ns_dev_disable(ns);
462 out_unlock:
463         mutex_unlock(&subsys->lock);
464 }
465
466 void nvmet_ns_free(struct nvmet_ns *ns)
467 {
468         nvmet_ns_disable(ns);
469
470         down_write(&nvmet_ana_sem);
471         nvmet_ana_group_enabled[ns->anagrpid]--;
472         up_write(&nvmet_ana_sem);
473
474         kfree(ns->device_path);
475         kfree(ns);
476 }
477
478 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
479 {
480         struct nvmet_ns *ns;
481
482         ns = kzalloc(sizeof(*ns), GFP_KERNEL);
483         if (!ns)
484                 return NULL;
485
486         INIT_LIST_HEAD(&ns->dev_link);
487         init_completion(&ns->disable_done);
488
489         ns->nsid = nsid;
490         ns->subsys = subsys;
491
492         down_write(&nvmet_ana_sem);
493         ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
494         nvmet_ana_group_enabled[ns->anagrpid]++;
495         up_write(&nvmet_ana_sem);
496
497         uuid_gen(&ns->uuid);
498         ns->buffered_io = false;
499
500         return ns;
501 }
502
503 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
504 {
505         u32 old_sqhd, new_sqhd;
506         u16 sqhd;
507
508         if (status)
509                 nvmet_set_status(req, status);
510
511         if (req->sq->size) {
512                 do {
513                         old_sqhd = req->sq->sqhd;
514                         new_sqhd = (old_sqhd + 1) % req->sq->size;
515                 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
516                                         old_sqhd);
517         }
518         sqhd = req->sq->sqhd & 0x0000FFFF;
519         req->rsp->sq_head = cpu_to_le16(sqhd);
520         req->rsp->sq_id = cpu_to_le16(req->sq->qid);
521         req->rsp->command_id = req->cmd->common.command_id;
522
523         if (req->ns)
524                 nvmet_put_namespace(req->ns);
525         req->ops->queue_response(req);
526 }
527
528 void nvmet_req_complete(struct nvmet_req *req, u16 status)
529 {
530         __nvmet_req_complete(req, status);
531         percpu_ref_put(&req->sq->ref);
532 }
533 EXPORT_SYMBOL_GPL(nvmet_req_complete);
534
535 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
536                 u16 qid, u16 size)
537 {
538         cq->qid = qid;
539         cq->size = size;
540
541         ctrl->cqs[qid] = cq;
542 }
543
544 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
545                 u16 qid, u16 size)
546 {
547         sq->sqhd = 0;
548         sq->qid = qid;
549         sq->size = size;
550
551         ctrl->sqs[qid] = sq;
552 }
553
554 static void nvmet_confirm_sq(struct percpu_ref *ref)
555 {
556         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
557
558         complete(&sq->confirm_done);
559 }
560
561 void nvmet_sq_destroy(struct nvmet_sq *sq)
562 {
563         /*
564          * If this is the admin queue, complete all AERs so that our
565          * queue doesn't have outstanding requests on it.
566          */
567         if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
568                 nvmet_async_events_free(sq->ctrl);
569         percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
570         wait_for_completion(&sq->confirm_done);
571         wait_for_completion(&sq->free_done);
572         percpu_ref_exit(&sq->ref);
573
574         if (sq->ctrl) {
575                 nvmet_ctrl_put(sq->ctrl);
576                 sq->ctrl = NULL; /* allows reusing the queue later */
577         }
578 }
579 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
580
581 static void nvmet_sq_free(struct percpu_ref *ref)
582 {
583         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
584
585         complete(&sq->free_done);
586 }
587
588 int nvmet_sq_init(struct nvmet_sq *sq)
589 {
590         int ret;
591
592         ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
593         if (ret) {
594                 pr_err("percpu_ref init failed!\n");
595                 return ret;
596         }
597         init_completion(&sq->free_done);
598         init_completion(&sq->confirm_done);
599
600         return 0;
601 }
602 EXPORT_SYMBOL_GPL(nvmet_sq_init);
603
604 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
605                 struct nvmet_ns *ns)
606 {
607         enum nvme_ana_state state = port->ana_state[ns->anagrpid];
608
609         if (unlikely(state == NVME_ANA_INACCESSIBLE))
610                 return NVME_SC_ANA_INACCESSIBLE;
611         if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
612                 return NVME_SC_ANA_PERSISTENT_LOSS;
613         if (unlikely(state == NVME_ANA_CHANGE))
614                 return NVME_SC_ANA_TRANSITION;
615         return 0;
616 }
617
618 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
619 {
620         if (unlikely(req->ns->readonly)) {
621                 switch (req->cmd->common.opcode) {
622                 case nvme_cmd_read:
623                 case nvme_cmd_flush:
624                         break;
625                 default:
626                         return NVME_SC_NS_WRITE_PROTECTED;
627                 }
628         }
629
630         return 0;
631 }
632
633 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
634 {
635         struct nvme_command *cmd = req->cmd;
636         u16 ret;
637
638         ret = nvmet_check_ctrl_status(req, cmd);
639         if (unlikely(ret))
640                 return ret;
641
642         req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
643         if (unlikely(!req->ns))
644                 return NVME_SC_INVALID_NS | NVME_SC_DNR;
645         ret = nvmet_check_ana_state(req->port, req->ns);
646         if (unlikely(ret))
647                 return ret;
648         ret = nvmet_io_cmd_check_access(req);
649         if (unlikely(ret))
650                 return ret;
651
652         if (req->ns->file)
653                 return nvmet_file_parse_io_cmd(req);
654         else
655                 return nvmet_bdev_parse_io_cmd(req);
656 }
657
658 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
659                 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
660 {
661         u8 flags = req->cmd->common.flags;
662         u16 status;
663
664         req->cq = cq;
665         req->sq = sq;
666         req->ops = ops;
667         req->sg = NULL;
668         req->sg_cnt = 0;
669         req->transfer_len = 0;
670         req->rsp->status = 0;
671         req->ns = NULL;
672
673         /* no support for fused commands yet */
674         if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
675                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
676                 goto fail;
677         }
678
679         /*
680          * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
681          * contains an address of a single contiguous physical buffer that is
682          * byte aligned.
683          */
684         if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
685                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
686                 goto fail;
687         }
688
689         if (unlikely(!req->sq->ctrl))
690                 /* will return an error for any Non-connect command: */
691                 status = nvmet_parse_connect_cmd(req);
692         else if (likely(req->sq->qid != 0))
693                 status = nvmet_parse_io_cmd(req);
694         else if (req->cmd->common.opcode == nvme_fabrics_command)
695                 status = nvmet_parse_fabrics_cmd(req);
696         else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
697                 status = nvmet_parse_discovery_cmd(req);
698         else
699                 status = nvmet_parse_admin_cmd(req);
700
701         if (status)
702                 goto fail;
703
704         if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
705                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
706                 goto fail;
707         }
708
709         return true;
710
711 fail:
712         __nvmet_req_complete(req, status);
713         return false;
714 }
715 EXPORT_SYMBOL_GPL(nvmet_req_init);
716
717 void nvmet_req_uninit(struct nvmet_req *req)
718 {
719         percpu_ref_put(&req->sq->ref);
720         if (req->ns)
721                 nvmet_put_namespace(req->ns);
722 }
723 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
724
725 void nvmet_req_execute(struct nvmet_req *req)
726 {
727         if (unlikely(req->data_len != req->transfer_len))
728                 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
729         else
730                 req->execute(req);
731 }
732 EXPORT_SYMBOL_GPL(nvmet_req_execute);
733
734 static inline bool nvmet_cc_en(u32 cc)
735 {
736         return (cc >> NVME_CC_EN_SHIFT) & 0x1;
737 }
738
739 static inline u8 nvmet_cc_css(u32 cc)
740 {
741         return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
742 }
743
744 static inline u8 nvmet_cc_mps(u32 cc)
745 {
746         return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
747 }
748
749 static inline u8 nvmet_cc_ams(u32 cc)
750 {
751         return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
752 }
753
754 static inline u8 nvmet_cc_shn(u32 cc)
755 {
756         return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
757 }
758
759 static inline u8 nvmet_cc_iosqes(u32 cc)
760 {
761         return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
762 }
763
764 static inline u8 nvmet_cc_iocqes(u32 cc)
765 {
766         return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
767 }
768
769 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
770 {
771         lockdep_assert_held(&ctrl->lock);
772
773         /*
774          * Only I/O controllers should verify iosqes,iocqes.
775          * Strictly speaking, the spec says a discovery controller
776          * should verify iosqes,iocqes are zeroed, however that
777          * would break backwards compatibility, so don't enforce it.
778          */
779         if (ctrl->subsys->type != NVME_NQN_DISC &&
780             (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
781              nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
782                 ctrl->csts = NVME_CSTS_CFS;
783                 return;
784         }
785
786         if (nvmet_cc_mps(ctrl->cc) != 0 ||
787             nvmet_cc_ams(ctrl->cc) != 0 ||
788             nvmet_cc_css(ctrl->cc) != 0) {
789                 ctrl->csts = NVME_CSTS_CFS;
790                 return;
791         }
792
793         ctrl->csts = NVME_CSTS_RDY;
794
795         /*
796          * Controllers that are not yet enabled should not really enforce the
797          * keep alive timeout, but we still want to track a timeout and cleanup
798          * in case a host died before it enabled the controller.  Hence, simply
799          * reset the keep alive timer when the controller is enabled.
800          */
801         if (ctrl->kato)
802                 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
803 }
804
805 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
806 {
807         lockdep_assert_held(&ctrl->lock);
808
809         /* XXX: tear down queues? */
810         ctrl->csts &= ~NVME_CSTS_RDY;
811         ctrl->cc = 0;
812 }
813
814 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
815 {
816         u32 old;
817
818         mutex_lock(&ctrl->lock);
819         old = ctrl->cc;
820         ctrl->cc = new;
821
822         if (nvmet_cc_en(new) && !nvmet_cc_en(old))
823                 nvmet_start_ctrl(ctrl);
824         if (!nvmet_cc_en(new) && nvmet_cc_en(old))
825                 nvmet_clear_ctrl(ctrl);
826         if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
827                 nvmet_clear_ctrl(ctrl);
828                 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
829         }
830         if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
831                 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
832         mutex_unlock(&ctrl->lock);
833 }
834
835 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
836 {
837         /* command sets supported: NVMe command set: */
838         ctrl->cap = (1ULL << 37);
839         /* CC.EN timeout in 500msec units: */
840         ctrl->cap |= (15ULL << 24);
841         /* maximum queue entries supported: */
842         ctrl->cap |= NVMET_QUEUE_SIZE - 1;
843 }
844
845 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
846                 struct nvmet_req *req, struct nvmet_ctrl **ret)
847 {
848         struct nvmet_subsys *subsys;
849         struct nvmet_ctrl *ctrl;
850         u16 status = 0;
851
852         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
853         if (!subsys) {
854                 pr_warn("connect request for invalid subsystem %s!\n",
855                         subsysnqn);
856                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
857                 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
858         }
859
860         mutex_lock(&subsys->lock);
861         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
862                 if (ctrl->cntlid == cntlid) {
863                         if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
864                                 pr_warn("hostnqn mismatch.\n");
865                                 continue;
866                         }
867                         if (!kref_get_unless_zero(&ctrl->ref))
868                                 continue;
869
870                         *ret = ctrl;
871                         goto out;
872                 }
873         }
874
875         pr_warn("could not find controller %d for subsys %s / host %s\n",
876                 cntlid, subsysnqn, hostnqn);
877         req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
878         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
879
880 out:
881         mutex_unlock(&subsys->lock);
882         nvmet_subsys_put(subsys);
883         return status;
884 }
885
886 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
887 {
888         if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
889                 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
890                        cmd->common.opcode, req->sq->qid);
891                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
892         }
893
894         if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
895                 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
896                        cmd->common.opcode, req->sq->qid);
897                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
898         }
899         return 0;
900 }
901
902 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
903                 const char *hostnqn)
904 {
905         struct nvmet_host_link *p;
906
907         if (subsys->allow_any_host)
908                 return true;
909
910         list_for_each_entry(p, &subsys->hosts, entry) {
911                 if (!strcmp(nvmet_host_name(p->host), hostnqn))
912                         return true;
913         }
914
915         return false;
916 }
917
918 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
919                 const char *hostnqn)
920 {
921         struct nvmet_subsys_link *s;
922
923         list_for_each_entry(s, &req->port->subsystems, entry) {
924                 if (__nvmet_host_allowed(s->subsys, hostnqn))
925                         return true;
926         }
927
928         return false;
929 }
930
931 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
932                 const char *hostnqn)
933 {
934         lockdep_assert_held(&nvmet_config_sem);
935
936         if (subsys->type == NVME_NQN_DISC)
937                 return nvmet_host_discovery_allowed(req, hostnqn);
938         else
939                 return __nvmet_host_allowed(subsys, hostnqn);
940 }
941
942 static void nvmet_fatal_error_handler(struct work_struct *work)
943 {
944         struct nvmet_ctrl *ctrl =
945                         container_of(work, struct nvmet_ctrl, fatal_err_work);
946
947         pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
948         ctrl->ops->delete_ctrl(ctrl);
949 }
950
951 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
952                 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
953 {
954         struct nvmet_subsys *subsys;
955         struct nvmet_ctrl *ctrl;
956         int ret;
957         u16 status;
958
959         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
960         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
961         if (!subsys) {
962                 pr_warn("connect request for invalid subsystem %s!\n",
963                         subsysnqn);
964                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
965                 goto out;
966         }
967
968         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
969         down_read(&nvmet_config_sem);
970         if (!nvmet_host_allowed(req, subsys, hostnqn)) {
971                 pr_info("connect by host %s for subsystem %s not allowed\n",
972                         hostnqn, subsysnqn);
973                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
974                 up_read(&nvmet_config_sem);
975                 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
976                 goto out_put_subsystem;
977         }
978         up_read(&nvmet_config_sem);
979
980         status = NVME_SC_INTERNAL;
981         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
982         if (!ctrl)
983                 goto out_put_subsystem;
984         mutex_init(&ctrl->lock);
985
986         nvmet_init_cap(ctrl);
987
988         ctrl->port = req->port;
989
990         INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
991         INIT_LIST_HEAD(&ctrl->async_events);
992         INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
993
994         memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
995         memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
996
997         kref_init(&ctrl->ref);
998         ctrl->subsys = subsys;
999         WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1000
1001         ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1002                         sizeof(__le32), GFP_KERNEL);
1003         if (!ctrl->changed_ns_list)
1004                 goto out_free_ctrl;
1005
1006         ctrl->cqs = kcalloc(subsys->max_qid + 1,
1007                         sizeof(struct nvmet_cq *),
1008                         GFP_KERNEL);
1009         if (!ctrl->cqs)
1010                 goto out_free_changed_ns_list;
1011
1012         ctrl->sqs = kcalloc(subsys->max_qid + 1,
1013                         sizeof(struct nvmet_sq *),
1014                         GFP_KERNEL);
1015         if (!ctrl->sqs)
1016                 goto out_free_cqs;
1017
1018         ret = ida_simple_get(&cntlid_ida,
1019                              NVME_CNTLID_MIN, NVME_CNTLID_MAX,
1020                              GFP_KERNEL);
1021         if (ret < 0) {
1022                 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1023                 goto out_free_sqs;
1024         }
1025         ctrl->cntlid = ret;
1026
1027         ctrl->ops = req->ops;
1028         if (ctrl->subsys->type == NVME_NQN_DISC) {
1029                 /* Don't accept keep-alive timeout for discovery controllers */
1030                 if (kato) {
1031                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
1032                         goto out_remove_ida;
1033                 }
1034
1035                 /*
1036                  * Discovery controllers use some arbitrary high value in order
1037                  * to cleanup stale discovery sessions
1038                  *
1039                  * From the latest base diff RC:
1040                  * "The Keep Alive command is not supported by
1041                  * Discovery controllers. A transport may specify a
1042                  * fixed Discovery controller activity timeout value
1043                  * (e.g., 2 minutes).  If no commands are received
1044                  * by a Discovery controller within that time
1045                  * period, the controller may perform the
1046                  * actions for Keep Alive Timer expiration".
1047                  */
1048                 ctrl->kato = NVMET_DISC_KATO;
1049         } else {
1050                 /* keep-alive timeout in seconds */
1051                 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1052         }
1053         nvmet_start_keep_alive_timer(ctrl);
1054
1055         mutex_lock(&subsys->lock);
1056         list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1057         mutex_unlock(&subsys->lock);
1058
1059         *ctrlp = ctrl;
1060         return 0;
1061
1062 out_remove_ida:
1063         ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1064 out_free_sqs:
1065         kfree(ctrl->sqs);
1066 out_free_cqs:
1067         kfree(ctrl->cqs);
1068 out_free_changed_ns_list:
1069         kfree(ctrl->changed_ns_list);
1070 out_free_ctrl:
1071         kfree(ctrl);
1072 out_put_subsystem:
1073         nvmet_subsys_put(subsys);
1074 out:
1075         return status;
1076 }
1077
1078 static void nvmet_ctrl_free(struct kref *ref)
1079 {
1080         struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1081         struct nvmet_subsys *subsys = ctrl->subsys;
1082
1083         mutex_lock(&subsys->lock);
1084         list_del(&ctrl->subsys_entry);
1085         mutex_unlock(&subsys->lock);
1086
1087         nvmet_stop_keep_alive_timer(ctrl);
1088
1089         flush_work(&ctrl->async_event_work);
1090         cancel_work_sync(&ctrl->fatal_err_work);
1091
1092         ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1093
1094         kfree(ctrl->sqs);
1095         kfree(ctrl->cqs);
1096         kfree(ctrl->changed_ns_list);
1097         kfree(ctrl);
1098
1099         nvmet_subsys_put(subsys);
1100 }
1101
1102 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1103 {
1104         kref_put(&ctrl->ref, nvmet_ctrl_free);
1105 }
1106
1107 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1108 {
1109         mutex_lock(&ctrl->lock);
1110         if (!(ctrl->csts & NVME_CSTS_CFS)) {
1111                 ctrl->csts |= NVME_CSTS_CFS;
1112                 schedule_work(&ctrl->fatal_err_work);
1113         }
1114         mutex_unlock(&ctrl->lock);
1115 }
1116 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1117
1118 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1119                 const char *subsysnqn)
1120 {
1121         struct nvmet_subsys_link *p;
1122
1123         if (!port)
1124                 return NULL;
1125
1126         if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
1127                         NVMF_NQN_SIZE)) {
1128                 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1129                         return NULL;
1130                 return nvmet_disc_subsys;
1131         }
1132
1133         down_read(&nvmet_config_sem);
1134         list_for_each_entry(p, &port->subsystems, entry) {
1135                 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1136                                 NVMF_NQN_SIZE)) {
1137                         if (!kref_get_unless_zero(&p->subsys->ref))
1138                                 break;
1139                         up_read(&nvmet_config_sem);
1140                         return p->subsys;
1141                 }
1142         }
1143         up_read(&nvmet_config_sem);
1144         return NULL;
1145 }
1146
1147 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1148                 enum nvme_subsys_type type)
1149 {
1150         struct nvmet_subsys *subsys;
1151
1152         subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1153         if (!subsys)
1154                 return NULL;
1155
1156         subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
1157         /* generate a random serial number as our controllers are ephemeral: */
1158         get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1159
1160         switch (type) {
1161         case NVME_NQN_NVME:
1162                 subsys->max_qid = NVMET_NR_QUEUES;
1163                 break;
1164         case NVME_NQN_DISC:
1165                 subsys->max_qid = 0;
1166                 break;
1167         default:
1168                 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1169                 kfree(subsys);
1170                 return NULL;
1171         }
1172         subsys->type = type;
1173         subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1174                         GFP_KERNEL);
1175         if (!subsys->subsysnqn) {
1176                 kfree(subsys);
1177                 return NULL;
1178         }
1179
1180         kref_init(&subsys->ref);
1181
1182         mutex_init(&subsys->lock);
1183         INIT_LIST_HEAD(&subsys->namespaces);
1184         INIT_LIST_HEAD(&subsys->ctrls);
1185         INIT_LIST_HEAD(&subsys->hosts);
1186
1187         return subsys;
1188 }
1189
1190 static void nvmet_subsys_free(struct kref *ref)
1191 {
1192         struct nvmet_subsys *subsys =
1193                 container_of(ref, struct nvmet_subsys, ref);
1194
1195         WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1196
1197         kfree(subsys->subsysnqn);
1198         kfree(subsys);
1199 }
1200
1201 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1202 {
1203         struct nvmet_ctrl *ctrl;
1204
1205         mutex_lock(&subsys->lock);
1206         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1207                 ctrl->ops->delete_ctrl(ctrl);
1208         mutex_unlock(&subsys->lock);
1209 }
1210
1211 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1212 {
1213         kref_put(&subsys->ref, nvmet_subsys_free);
1214 }
1215
1216 static int __init nvmet_init(void)
1217 {
1218         int error;
1219
1220         nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1221
1222         buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1223                         WQ_MEM_RECLAIM, 0);
1224         if (!buffered_io_wq) {
1225                 error = -ENOMEM;
1226                 goto out;
1227         }
1228
1229         error = nvmet_init_discovery();
1230         if (error)
1231                 goto out_free_work_queue;
1232
1233         error = nvmet_init_configfs();
1234         if (error)
1235                 goto out_exit_discovery;
1236         return 0;
1237
1238 out_exit_discovery:
1239         nvmet_exit_discovery();
1240 out_free_work_queue:
1241         destroy_workqueue(buffered_io_wq);
1242 out:
1243         return error;
1244 }
1245
1246 static void __exit nvmet_exit(void)
1247 {
1248         nvmet_exit_configfs();
1249         nvmet_exit_discovery();
1250         ida_destroy(&cntlid_ida);
1251         destroy_workqueue(buffered_io_wq);
1252
1253         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1254         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1255 }
1256
1257 module_init(nvmet_init);
1258 module_exit(nvmet_exit);
1259
1260 MODULE_LICENSE("GPL v2");