GNU Linux-libre 4.19.211-gnu1
[releases.git] / drivers / nvme / host / core.c
1 /*
2  * NVM Express device driver
3  * Copyright (c) 2011-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/pr.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
29 #include <linux/pm_qos.h>
30 #include <asm/unaligned.h>
31
32 #define CREATE_TRACE_POINTS
33 #include "trace.h"
34
35 #include "nvme.h"
36 #include "fabrics.h"
37
38 #define NVME_MINORS             (1U << MINORBITS)
39
40 unsigned int admin_timeout = 60;
41 module_param(admin_timeout, uint, 0644);
42 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
43 EXPORT_SYMBOL_GPL(admin_timeout);
44
45 unsigned int nvme_io_timeout = 30;
46 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
47 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
48 EXPORT_SYMBOL_GPL(nvme_io_timeout);
49
50 static unsigned char shutdown_timeout = 5;
51 module_param(shutdown_timeout, byte, 0644);
52 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
53
54 static u8 nvme_max_retries = 5;
55 module_param_named(max_retries, nvme_max_retries, byte, 0644);
56 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
57
58 static unsigned long default_ps_max_latency_us = 100000;
59 module_param(default_ps_max_latency_us, ulong, 0644);
60 MODULE_PARM_DESC(default_ps_max_latency_us,
61                  "max power saving latency for new devices; use PM QOS to change per device");
62
63 static bool force_apst;
64 module_param(force_apst, bool, 0644);
65 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
66
67 static bool streams;
68 module_param(streams, bool, 0644);
69 MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
70
71 /*
72  * nvme_wq - hosts nvme related works that are not reset or delete
73  * nvme_reset_wq - hosts nvme reset works
74  * nvme_delete_wq - hosts nvme delete works
75  *
76  * nvme_wq will host works such are scan, aen handling, fw activation,
77  * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
78  * runs reset works which also flush works hosted on nvme_wq for
79  * serialization purposes. nvme_delete_wq host controller deletion
80  * works which flush reset works for serialization.
81  */
82 struct workqueue_struct *nvme_wq;
83 EXPORT_SYMBOL_GPL(nvme_wq);
84
85 struct workqueue_struct *nvme_reset_wq;
86 EXPORT_SYMBOL_GPL(nvme_reset_wq);
87
88 struct workqueue_struct *nvme_delete_wq;
89 EXPORT_SYMBOL_GPL(nvme_delete_wq);
90
91 static DEFINE_IDA(nvme_subsystems_ida);
92 static LIST_HEAD(nvme_subsystems);
93 static DEFINE_MUTEX(nvme_subsystems_lock);
94
95 static DEFINE_IDA(nvme_instance_ida);
96 static dev_t nvme_chr_devt;
97 static struct class *nvme_class;
98 static struct class *nvme_subsys_class;
99
100 static void nvme_ns_remove(struct nvme_ns *ns);
101 static int nvme_revalidate_disk(struct gendisk *disk);
102 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
103 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
104                                            unsigned nsid);
105
106 static void nvme_set_queue_dying(struct nvme_ns *ns)
107 {
108         /*
109          * Revalidating a dead namespace sets capacity to 0. This will end
110          * buffered writers dirtying pages that can't be synced.
111          */
112         if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
113                 return;
114         blk_set_queue_dying(ns->queue);
115         /* Forcibly unquiesce queues to avoid blocking dispatch */
116         blk_mq_unquiesce_queue(ns->queue);
117         /*
118          * Revalidate after unblocking dispatchers that may be holding bd_butex
119          */
120         revalidate_disk(ns->disk);
121 }
122
123 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
124 {
125         /*
126          * Only new queue scan work when admin and IO queues are both alive
127          */
128         if (ctrl->state == NVME_CTRL_LIVE)
129                 queue_work(nvme_wq, &ctrl->scan_work);
130 }
131
132 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
133 {
134         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
135                 return -EBUSY;
136         if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
137                 return -EBUSY;
138         return 0;
139 }
140 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
141
142 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
143 {
144         int ret;
145
146         ret = nvme_reset_ctrl(ctrl);
147         if (!ret) {
148                 flush_work(&ctrl->reset_work);
149                 if (ctrl->state != NVME_CTRL_LIVE &&
150                     ctrl->state != NVME_CTRL_ADMIN_ONLY)
151                         ret = -ENETRESET;
152         }
153
154         return ret;
155 }
156 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
157
158 static void nvme_delete_ctrl_work(struct work_struct *work)
159 {
160         struct nvme_ctrl *ctrl =
161                 container_of(work, struct nvme_ctrl, delete_work);
162
163         dev_info(ctrl->device,
164                  "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
165
166         flush_work(&ctrl->reset_work);
167         nvme_stop_ctrl(ctrl);
168         nvme_remove_namespaces(ctrl);
169         ctrl->ops->delete_ctrl(ctrl);
170         nvme_uninit_ctrl(ctrl);
171         nvme_put_ctrl(ctrl);
172 }
173
174 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
175 {
176         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
177                 return -EBUSY;
178         if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
179                 return -EBUSY;
180         return 0;
181 }
182 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
183
184 int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
185 {
186         int ret = 0;
187
188         /*
189          * Keep a reference until the work is flushed since ->delete_ctrl
190          * can free the controller.
191          */
192         nvme_get_ctrl(ctrl);
193         ret = nvme_delete_ctrl(ctrl);
194         if (!ret)
195                 flush_work(&ctrl->delete_work);
196         nvme_put_ctrl(ctrl);
197         return ret;
198 }
199 EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync);
200
201 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
202 {
203         return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
204 }
205
206 static blk_status_t nvme_error_status(struct request *req)
207 {
208         switch (nvme_req(req)->status & 0x7ff) {
209         case NVME_SC_SUCCESS:
210                 return BLK_STS_OK;
211         case NVME_SC_CAP_EXCEEDED:
212                 return BLK_STS_NOSPC;
213         case NVME_SC_LBA_RANGE:
214                 return BLK_STS_TARGET;
215         case NVME_SC_BAD_ATTRIBUTES:
216         case NVME_SC_ONCS_NOT_SUPPORTED:
217         case NVME_SC_INVALID_OPCODE:
218         case NVME_SC_INVALID_FIELD:
219         case NVME_SC_INVALID_NS:
220                 return BLK_STS_NOTSUPP;
221         case NVME_SC_WRITE_FAULT:
222         case NVME_SC_READ_ERROR:
223         case NVME_SC_UNWRITTEN_BLOCK:
224         case NVME_SC_ACCESS_DENIED:
225         case NVME_SC_READ_ONLY:
226         case NVME_SC_COMPARE_FAILED:
227                 return BLK_STS_MEDIUM;
228         case NVME_SC_GUARD_CHECK:
229         case NVME_SC_APPTAG_CHECK:
230         case NVME_SC_REFTAG_CHECK:
231         case NVME_SC_INVALID_PI:
232                 return BLK_STS_PROTECTION;
233         case NVME_SC_RESERVATION_CONFLICT:
234                 return BLK_STS_NEXUS;
235         default:
236                 return BLK_STS_IOERR;
237         }
238 }
239
240 static inline bool nvme_req_needs_retry(struct request *req)
241 {
242         if (blk_noretry_request(req))
243                 return false;
244         if (nvme_req(req)->status & NVME_SC_DNR)
245                 return false;
246         if (nvme_req(req)->retries >= nvme_max_retries)
247                 return false;
248         return true;
249 }
250
251 void nvme_complete_rq(struct request *req)
252 {
253         blk_status_t status = nvme_error_status(req);
254
255         trace_nvme_complete_rq(req);
256
257         if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
258                 if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
259                         return;
260
261                 if (!blk_queue_dying(req->q)) {
262                         nvme_req(req)->retries++;
263                         blk_mq_requeue_request(req, true);
264                         return;
265                 }
266         }
267         blk_mq_end_request(req, status);
268 }
269 EXPORT_SYMBOL_GPL(nvme_complete_rq);
270
271 void nvme_cancel_request(struct request *req, void *data, bool reserved)
272 {
273         dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
274                                 "Cancelling I/O %d", req->tag);
275
276         nvme_req(req)->status = NVME_SC_ABORT_REQ;
277         blk_mq_complete_request(req);
278
279 }
280 EXPORT_SYMBOL_GPL(nvme_cancel_request);
281
282 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
283                 enum nvme_ctrl_state new_state)
284 {
285         enum nvme_ctrl_state old_state;
286         unsigned long flags;
287         bool changed = false;
288
289         spin_lock_irqsave(&ctrl->lock, flags);
290
291         old_state = ctrl->state;
292         switch (new_state) {
293         case NVME_CTRL_ADMIN_ONLY:
294                 switch (old_state) {
295                 case NVME_CTRL_CONNECTING:
296                         changed = true;
297                         /* FALLTHRU */
298                 default:
299                         break;
300                 }
301                 break;
302         case NVME_CTRL_LIVE:
303                 switch (old_state) {
304                 case NVME_CTRL_NEW:
305                 case NVME_CTRL_RESETTING:
306                 case NVME_CTRL_CONNECTING:
307                         changed = true;
308                         /* FALLTHRU */
309                 default:
310                         break;
311                 }
312                 break;
313         case NVME_CTRL_RESETTING:
314                 switch (old_state) {
315                 case NVME_CTRL_NEW:
316                 case NVME_CTRL_LIVE:
317                 case NVME_CTRL_ADMIN_ONLY:
318                         changed = true;
319                         /* FALLTHRU */
320                 default:
321                         break;
322                 }
323                 break;
324         case NVME_CTRL_CONNECTING:
325                 switch (old_state) {
326                 case NVME_CTRL_NEW:
327                 case NVME_CTRL_RESETTING:
328                         changed = true;
329                         /* FALLTHRU */
330                 default:
331                         break;
332                 }
333                 break;
334         case NVME_CTRL_DELETING:
335                 switch (old_state) {
336                 case NVME_CTRL_LIVE:
337                 case NVME_CTRL_ADMIN_ONLY:
338                 case NVME_CTRL_RESETTING:
339                 case NVME_CTRL_CONNECTING:
340                         changed = true;
341                         /* FALLTHRU */
342                 default:
343                         break;
344                 }
345                 break;
346         case NVME_CTRL_DEAD:
347                 switch (old_state) {
348                 case NVME_CTRL_DELETING:
349                         changed = true;
350                         /* FALLTHRU */
351                 default:
352                         break;
353                 }
354                 break;
355         default:
356                 break;
357         }
358
359         if (changed)
360                 ctrl->state = new_state;
361
362         spin_unlock_irqrestore(&ctrl->lock, flags);
363         if (changed && ctrl->state == NVME_CTRL_LIVE)
364                 nvme_kick_requeue_lists(ctrl);
365         return changed;
366 }
367 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
368
369 static void nvme_free_ns_head(struct kref *ref)
370 {
371         struct nvme_ns_head *head =
372                 container_of(ref, struct nvme_ns_head, ref);
373
374         nvme_mpath_remove_disk(head);
375         ida_simple_remove(&head->subsys->ns_ida, head->instance);
376         list_del_init(&head->entry);
377         cleanup_srcu_struct_quiesced(&head->srcu);
378         nvme_put_subsystem(head->subsys);
379         kfree(head);
380 }
381
382 static void nvme_put_ns_head(struct nvme_ns_head *head)
383 {
384         kref_put(&head->ref, nvme_free_ns_head);
385 }
386
387 static void nvme_free_ns(struct kref *kref)
388 {
389         struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
390
391         if (ns->ndev)
392                 nvme_nvm_unregister(ns);
393
394         put_disk(ns->disk);
395         nvme_put_ns_head(ns->head);
396         nvme_put_ctrl(ns->ctrl);
397         kfree(ns);
398 }
399
400 static void nvme_put_ns(struct nvme_ns *ns)
401 {
402         kref_put(&ns->kref, nvme_free_ns);
403 }
404
405 static inline void nvme_clear_nvme_request(struct request *req)
406 {
407         if (!(req->rq_flags & RQF_DONTPREP)) {
408                 nvme_req(req)->retries = 0;
409                 nvme_req(req)->flags = 0;
410                 req->rq_flags |= RQF_DONTPREP;
411         }
412 }
413
414 struct request *nvme_alloc_request(struct request_queue *q,
415                 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
416 {
417         unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
418         struct request *req;
419
420         if (qid == NVME_QID_ANY) {
421                 req = blk_mq_alloc_request(q, op, flags);
422         } else {
423                 req = blk_mq_alloc_request_hctx(q, op, flags,
424                                 qid ? qid - 1 : 0);
425         }
426         if (IS_ERR(req))
427                 return req;
428
429         req->cmd_flags |= REQ_FAILFAST_DRIVER;
430         nvme_clear_nvme_request(req);
431         nvme_req(req)->cmd = cmd;
432
433         return req;
434 }
435 EXPORT_SYMBOL_GPL(nvme_alloc_request);
436
437 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
438 {
439         struct nvme_command c;
440
441         memset(&c, 0, sizeof(c));
442
443         c.directive.opcode = nvme_admin_directive_send;
444         c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
445         c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
446         c.directive.dtype = NVME_DIR_IDENTIFY;
447         c.directive.tdtype = NVME_DIR_STREAMS;
448         c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
449
450         return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
451 }
452
453 static int nvme_disable_streams(struct nvme_ctrl *ctrl)
454 {
455         return nvme_toggle_streams(ctrl, false);
456 }
457
458 static int nvme_enable_streams(struct nvme_ctrl *ctrl)
459 {
460         return nvme_toggle_streams(ctrl, true);
461 }
462
463 static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
464                                   struct streams_directive_params *s, u32 nsid)
465 {
466         struct nvme_command c;
467
468         memset(&c, 0, sizeof(c));
469         memset(s, 0, sizeof(*s));
470
471         c.directive.opcode = nvme_admin_directive_recv;
472         c.directive.nsid = cpu_to_le32(nsid);
473         c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
474         c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
475         c.directive.dtype = NVME_DIR_STREAMS;
476
477         return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
478 }
479
480 static int nvme_configure_directives(struct nvme_ctrl *ctrl)
481 {
482         struct streams_directive_params s;
483         int ret;
484
485         if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
486                 return 0;
487         if (!streams)
488                 return 0;
489
490         ret = nvme_enable_streams(ctrl);
491         if (ret)
492                 return ret;
493
494         ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
495         if (ret)
496                 return ret;
497
498         ctrl->nssa = le16_to_cpu(s.nssa);
499         if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
500                 dev_info(ctrl->device, "too few streams (%u) available\n",
501                                         ctrl->nssa);
502                 nvme_disable_streams(ctrl);
503                 return 0;
504         }
505
506         ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
507         dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
508         return 0;
509 }
510
511 /*
512  * Check if 'req' has a write hint associated with it. If it does, assign
513  * a valid namespace stream to the write.
514  */
515 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
516                                      struct request *req, u16 *control,
517                                      u32 *dsmgmt)
518 {
519         enum rw_hint streamid = req->write_hint;
520
521         if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
522                 streamid = 0;
523         else {
524                 streamid--;
525                 if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
526                         return;
527
528                 *control |= NVME_RW_DTYPE_STREAMS;
529                 *dsmgmt |= streamid << 16;
530         }
531
532         if (streamid < ARRAY_SIZE(req->q->write_hints))
533                 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
534 }
535
536 static inline void nvme_setup_flush(struct nvme_ns *ns,
537                 struct nvme_command *cmnd)
538 {
539         memset(cmnd, 0, sizeof(*cmnd));
540         cmnd->common.opcode = nvme_cmd_flush;
541         cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
542 }
543
544 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
545                 struct nvme_command *cmnd)
546 {
547         unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
548         struct nvme_dsm_range *range;
549         struct bio *bio;
550
551         /*
552          * Some devices do not consider the DSM 'Number of Ranges' field when
553          * determining how much data to DMA. Always allocate memory for maximum
554          * number of segments to prevent device reading beyond end of buffer.
555          */
556         static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
557
558         range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
559         if (!range) {
560                 /*
561                  * If we fail allocation our range, fallback to the controller
562                  * discard page. If that's also busy, it's safe to return
563                  * busy, as we know we can make progress once that's freed.
564                  */
565                 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
566                         return BLK_STS_RESOURCE;
567
568                 range = page_address(ns->ctrl->discard_page);
569         }
570
571         __rq_for_each_bio(bio, req) {
572                 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
573                 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
574
575                 if (n < segments) {
576                         range[n].cattr = cpu_to_le32(0);
577                         range[n].nlb = cpu_to_le32(nlb);
578                         range[n].slba = cpu_to_le64(slba);
579                 }
580                 n++;
581         }
582
583         if (WARN_ON_ONCE(n != segments)) {
584                 if (virt_to_page(range) == ns->ctrl->discard_page)
585                         clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
586                 else
587                         kfree(range);
588                 return BLK_STS_IOERR;
589         }
590
591         memset(cmnd, 0, sizeof(*cmnd));
592         cmnd->dsm.opcode = nvme_cmd_dsm;
593         cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
594         cmnd->dsm.nr = cpu_to_le32(segments - 1);
595         cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
596
597         req->special_vec.bv_page = virt_to_page(range);
598         req->special_vec.bv_offset = offset_in_page(range);
599         req->special_vec.bv_len = alloc_size;
600         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
601
602         return BLK_STS_OK;
603 }
604
605 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
606                 struct request *req, struct nvme_command *cmnd)
607 {
608         struct nvme_ctrl *ctrl = ns->ctrl;
609         u16 control = 0;
610         u32 dsmgmt = 0;
611
612         if (req->cmd_flags & REQ_FUA)
613                 control |= NVME_RW_FUA;
614         if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
615                 control |= NVME_RW_LR;
616
617         if (req->cmd_flags & REQ_RAHEAD)
618                 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
619
620         memset(cmnd, 0, sizeof(*cmnd));
621         cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
622         cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
623         cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
624         cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
625
626         if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
627                 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
628
629         if (ns->ms) {
630                 /*
631                  * If formated with metadata, the block layer always provides a
632                  * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
633                  * we enable the PRACT bit for protection information or set the
634                  * namespace capacity to zero to prevent any I/O.
635                  */
636                 if (!blk_integrity_rq(req)) {
637                         if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
638                                 return BLK_STS_NOTSUPP;
639                         control |= NVME_RW_PRINFO_PRACT;
640                 } else if (req_op(req) == REQ_OP_WRITE) {
641                         t10_pi_prepare(req, ns->pi_type);
642                 }
643
644                 switch (ns->pi_type) {
645                 case NVME_NS_DPS_PI_TYPE3:
646                         control |= NVME_RW_PRINFO_PRCHK_GUARD;
647                         break;
648                 case NVME_NS_DPS_PI_TYPE1:
649                 case NVME_NS_DPS_PI_TYPE2:
650                         control |= NVME_RW_PRINFO_PRCHK_GUARD |
651                                         NVME_RW_PRINFO_PRCHK_REF;
652                         cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
653                         break;
654                 }
655         }
656
657         cmnd->rw.control = cpu_to_le16(control);
658         cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
659         return 0;
660 }
661
662 void nvme_cleanup_cmd(struct request *req)
663 {
664         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
665             nvme_req(req)->status == 0) {
666                 struct nvme_ns *ns = req->rq_disk->private_data;
667
668                 t10_pi_complete(req, ns->pi_type,
669                                 blk_rq_bytes(req) >> ns->lba_shift);
670         }
671         if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
672                 struct nvme_ns *ns = req->rq_disk->private_data;
673                 struct page *page = req->special_vec.bv_page;
674
675                 if (page == ns->ctrl->discard_page)
676                         clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
677                 else
678                         kfree(page_address(page) + req->special_vec.bv_offset);
679         }
680 }
681 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
682
683 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
684                 struct nvme_command *cmd)
685 {
686         blk_status_t ret = BLK_STS_OK;
687
688         nvme_clear_nvme_request(req);
689
690         switch (req_op(req)) {
691         case REQ_OP_DRV_IN:
692         case REQ_OP_DRV_OUT:
693                 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
694                 break;
695         case REQ_OP_FLUSH:
696                 nvme_setup_flush(ns, cmd);
697                 break;
698         case REQ_OP_WRITE_ZEROES:
699                 /* currently only aliased to deallocate for a few ctrls: */
700         case REQ_OP_DISCARD:
701                 ret = nvme_setup_discard(ns, req, cmd);
702                 break;
703         case REQ_OP_READ:
704         case REQ_OP_WRITE:
705                 ret = nvme_setup_rw(ns, req, cmd);
706                 break;
707         default:
708                 WARN_ON_ONCE(1);
709                 return BLK_STS_IOERR;
710         }
711
712         cmd->common.command_id = req->tag;
713         trace_nvme_setup_cmd(req, cmd);
714         return ret;
715 }
716 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
717
718 /*
719  * Returns 0 on success.  If the result is negative, it's a Linux error code;
720  * if the result is positive, it's an NVM Express status code
721  */
722 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
723                 union nvme_result *result, void *buffer, unsigned bufflen,
724                 unsigned timeout, int qid, int at_head,
725                 blk_mq_req_flags_t flags)
726 {
727         struct request *req;
728         int ret;
729
730         req = nvme_alloc_request(q, cmd, flags, qid);
731         if (IS_ERR(req))
732                 return PTR_ERR(req);
733
734         req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
735
736         if (buffer && bufflen) {
737                 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
738                 if (ret)
739                         goto out;
740         }
741
742         blk_execute_rq(req->q, NULL, req, at_head);
743         if (result)
744                 *result = nvme_req(req)->result;
745         if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
746                 ret = -EINTR;
747         else
748                 ret = nvme_req(req)->status;
749  out:
750         blk_mq_free_request(req);
751         return ret;
752 }
753 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
754
755 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
756                 void *buffer, unsigned bufflen)
757 {
758         return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
759                         NVME_QID_ANY, 0, 0);
760 }
761 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
762
763 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
764                 unsigned len, u32 seed, bool write)
765 {
766         struct bio_integrity_payload *bip;
767         int ret = -ENOMEM;
768         void *buf;
769
770         buf = kmalloc(len, GFP_KERNEL);
771         if (!buf)
772                 goto out;
773
774         ret = -EFAULT;
775         if (write && copy_from_user(buf, ubuf, len))
776                 goto out_free_meta;
777
778         bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
779         if (IS_ERR(bip)) {
780                 ret = PTR_ERR(bip);
781                 goto out_free_meta;
782         }
783
784         bip->bip_iter.bi_size = len;
785         bip->bip_iter.bi_sector = seed;
786         ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
787                         offset_in_page(buf));
788         if (ret == len)
789                 return buf;
790         ret = -ENOMEM;
791 out_free_meta:
792         kfree(buf);
793 out:
794         return ERR_PTR(ret);
795 }
796
797 static int nvme_submit_user_cmd(struct request_queue *q,
798                 struct nvme_command *cmd, void __user *ubuffer,
799                 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
800                 u32 meta_seed, u32 *result, unsigned timeout)
801 {
802         bool write = nvme_is_write(cmd);
803         struct nvme_ns *ns = q->queuedata;
804         struct gendisk *disk = ns ? ns->disk : NULL;
805         struct request *req;
806         struct bio *bio = NULL;
807         void *meta = NULL;
808         int ret;
809
810         req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
811         if (IS_ERR(req))
812                 return PTR_ERR(req);
813
814         req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
815         nvme_req(req)->flags |= NVME_REQ_USERCMD;
816
817         if (ubuffer && bufflen) {
818                 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
819                                 GFP_KERNEL);
820                 if (ret)
821                         goto out;
822                 bio = req->bio;
823                 bio->bi_disk = disk;
824                 if (disk && meta_buffer && meta_len) {
825                         meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
826                                         meta_seed, write);
827                         if (IS_ERR(meta)) {
828                                 ret = PTR_ERR(meta);
829                                 goto out_unmap;
830                         }
831                         req->cmd_flags |= REQ_INTEGRITY;
832                 }
833         }
834
835         blk_execute_rq(req->q, disk, req, 0);
836         if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
837                 ret = -EINTR;
838         else
839                 ret = nvme_req(req)->status;
840         if (result)
841                 *result = le32_to_cpu(nvme_req(req)->result.u32);
842         if (meta && !ret && !write) {
843                 if (copy_to_user(meta_buffer, meta, meta_len))
844                         ret = -EFAULT;
845         }
846         kfree(meta);
847  out_unmap:
848         if (bio)
849                 blk_rq_unmap_user(bio);
850  out:
851         blk_mq_free_request(req);
852         return ret;
853 }
854
855 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
856 {
857         struct nvme_ctrl *ctrl = rq->end_io_data;
858         unsigned long flags;
859         bool startka = false;
860
861         blk_mq_free_request(rq);
862
863         if (status) {
864                 dev_err(ctrl->device,
865                         "failed nvme_keep_alive_end_io error=%d\n",
866                                 status);
867                 return;
868         }
869
870         spin_lock_irqsave(&ctrl->lock, flags);
871         if (ctrl->state == NVME_CTRL_LIVE ||
872             ctrl->state == NVME_CTRL_CONNECTING)
873                 startka = true;
874         spin_unlock_irqrestore(&ctrl->lock, flags);
875         if (startka)
876                 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
877 }
878
879 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
880 {
881         struct request *rq;
882
883         rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
884                         NVME_QID_ANY);
885         if (IS_ERR(rq))
886                 return PTR_ERR(rq);
887
888         rq->timeout = ctrl->kato * HZ;
889         rq->end_io_data = ctrl;
890
891         blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
892
893         return 0;
894 }
895
896 static void nvme_keep_alive_work(struct work_struct *work)
897 {
898         struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
899                         struct nvme_ctrl, ka_work);
900
901         if (nvme_keep_alive(ctrl)) {
902                 /* allocation failure, reset the controller */
903                 dev_err(ctrl->device, "keep-alive failed\n");
904                 nvme_reset_ctrl(ctrl);
905                 return;
906         }
907 }
908
909 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
910 {
911         if (unlikely(ctrl->kato == 0))
912                 return;
913
914         schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
915 }
916
917 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
918 {
919         if (unlikely(ctrl->kato == 0))
920                 return;
921
922         cancel_delayed_work_sync(&ctrl->ka_work);
923 }
924 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
925
926 /*
927  * In NVMe 1.0 the CNS field was just a binary controller or namespace
928  * flag, thus sending any new CNS opcodes has a big chance of not working.
929  * Qemu unfortunately had that bug after reporting a 1.1 version compliance
930  * (but not for any later version).
931  */
932 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
933 {
934         if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
935                 return ctrl->vs < NVME_VS(1, 2, 0);
936         return ctrl->vs < NVME_VS(1, 1, 0);
937 }
938
939 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
940 {
941         struct nvme_command c = { };
942         int error;
943
944         /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
945         c.identify.opcode = nvme_admin_identify;
946         c.identify.cns = NVME_ID_CNS_CTRL;
947
948         *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
949         if (!*id)
950                 return -ENOMEM;
951
952         error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
953                         sizeof(struct nvme_id_ctrl));
954         if (error)
955                 kfree(*id);
956         return error;
957 }
958
959 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
960                 struct nvme_ns_ids *ids)
961 {
962         struct nvme_command c = { };
963         int status;
964         void *data;
965         int pos;
966         int len;
967
968         c.identify.opcode = nvme_admin_identify;
969         c.identify.nsid = cpu_to_le32(nsid);
970         c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
971
972         data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
973         if (!data)
974                 return -ENOMEM;
975
976         status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
977                                       NVME_IDENTIFY_DATA_SIZE);
978         if (status)
979                 goto free_data;
980
981         for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
982                 struct nvme_ns_id_desc *cur = data + pos;
983
984                 if (cur->nidl == 0)
985                         break;
986
987                 switch (cur->nidt) {
988                 case NVME_NIDT_EUI64:
989                         if (cur->nidl != NVME_NIDT_EUI64_LEN) {
990                                 dev_warn(ctrl->device,
991                                          "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
992                                          cur->nidl);
993                                 goto free_data;
994                         }
995                         len = NVME_NIDT_EUI64_LEN;
996                         memcpy(ids->eui64, data + pos + sizeof(*cur), len);
997                         break;
998                 case NVME_NIDT_NGUID:
999                         if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1000                                 dev_warn(ctrl->device,
1001                                          "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
1002                                          cur->nidl);
1003                                 goto free_data;
1004                         }
1005                         len = NVME_NIDT_NGUID_LEN;
1006                         memcpy(ids->nguid, data + pos + sizeof(*cur), len);
1007                         break;
1008                 case NVME_NIDT_UUID:
1009                         if (cur->nidl != NVME_NIDT_UUID_LEN) {
1010                                 dev_warn(ctrl->device,
1011                                          "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
1012                                          cur->nidl);
1013                                 goto free_data;
1014                         }
1015                         len = NVME_NIDT_UUID_LEN;
1016                         uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
1017                         break;
1018                 default:
1019                         /* Skip unnkown types */
1020                         len = cur->nidl;
1021                         break;
1022                 }
1023
1024                 len += sizeof(*cur);
1025         }
1026 free_data:
1027         kfree(data);
1028         return status;
1029 }
1030
1031 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
1032 {
1033         struct nvme_command c = { };
1034
1035         c.identify.opcode = nvme_admin_identify;
1036         c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
1037         c.identify.nsid = cpu_to_le32(nsid);
1038         return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
1039                                     NVME_IDENTIFY_DATA_SIZE);
1040 }
1041
1042 static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
1043                 unsigned nsid)
1044 {
1045         struct nvme_id_ns *id;
1046         struct nvme_command c = { };
1047         int error;
1048
1049         /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1050         c.identify.opcode = nvme_admin_identify;
1051         c.identify.nsid = cpu_to_le32(nsid);
1052         c.identify.cns = NVME_ID_CNS_NS;
1053
1054         id = kmalloc(sizeof(*id), GFP_KERNEL);
1055         if (!id)
1056                 return NULL;
1057
1058         error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1059         if (error) {
1060                 dev_warn(ctrl->device, "Identify namespace failed\n");
1061                 kfree(id);
1062                 return NULL;
1063         }
1064
1065         return id;
1066 }
1067
1068 static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
1069                       void *buffer, size_t buflen, u32 *result)
1070 {
1071         union nvme_result res = { 0 };
1072         struct nvme_command c;
1073         int ret;
1074
1075         memset(&c, 0, sizeof(c));
1076         c.features.opcode = nvme_admin_set_features;
1077         c.features.fid = cpu_to_le32(fid);
1078         c.features.dword11 = cpu_to_le32(dword11);
1079
1080         ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1081                         buffer, buflen, 0, NVME_QID_ANY, 0, 0);
1082         if (ret >= 0 && result)
1083                 *result = le32_to_cpu(res.u32);
1084         return ret;
1085 }
1086
1087 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1088 {
1089         u32 q_count = (*count - 1) | ((*count - 1) << 16);
1090         u32 result;
1091         int status, nr_io_queues;
1092
1093         status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1094                         &result);
1095         if (status < 0)
1096                 return status;
1097
1098         /*
1099          * Degraded controllers might return an error when setting the queue
1100          * count.  We still want to be able to bring them online and offer
1101          * access to the admin queue, as that might be only way to fix them up.
1102          */
1103         if (status > 0) {
1104                 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1105                 *count = 0;
1106         } else {
1107                 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1108                 *count = min(*count, nr_io_queues);
1109         }
1110
1111         return 0;
1112 }
1113 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1114
1115 #define NVME_AEN_SUPPORTED \
1116         (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE)
1117
1118 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1119 {
1120         u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1121         int status;
1122
1123         if (!supported_aens)
1124                 return;
1125
1126         status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1127                         NULL, 0, &result);
1128         if (status)
1129                 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1130                          supported_aens);
1131 }
1132
1133 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1134 {
1135         struct nvme_user_io io;
1136         struct nvme_command c;
1137         unsigned length, meta_len;
1138         void __user *metadata;
1139
1140         if (copy_from_user(&io, uio, sizeof(io)))
1141                 return -EFAULT;
1142         if (io.flags)
1143                 return -EINVAL;
1144
1145         switch (io.opcode) {
1146         case nvme_cmd_write:
1147         case nvme_cmd_read:
1148         case nvme_cmd_compare:
1149                 break;
1150         default:
1151                 return -EINVAL;
1152         }
1153
1154         length = (io.nblocks + 1) << ns->lba_shift;
1155         meta_len = (io.nblocks + 1) * ns->ms;
1156         metadata = (void __user *)(uintptr_t)io.metadata;
1157
1158         if (ns->ext) {
1159                 length += meta_len;
1160                 meta_len = 0;
1161         } else if (meta_len) {
1162                 if ((io.metadata & 3) || !io.metadata)
1163                         return -EINVAL;
1164         }
1165
1166         memset(&c, 0, sizeof(c));
1167         c.rw.opcode = io.opcode;
1168         c.rw.flags = io.flags;
1169         c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1170         c.rw.slba = cpu_to_le64(io.slba);
1171         c.rw.length = cpu_to_le16(io.nblocks);
1172         c.rw.control = cpu_to_le16(io.control);
1173         c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1174         c.rw.reftag = cpu_to_le32(io.reftag);
1175         c.rw.apptag = cpu_to_le16(io.apptag);
1176         c.rw.appmask = cpu_to_le16(io.appmask);
1177
1178         return nvme_submit_user_cmd(ns->queue, &c,
1179                         (void __user *)(uintptr_t)io.addr, length,
1180                         metadata, meta_len, io.slba, NULL, 0);
1181 }
1182
1183 static u32 nvme_known_admin_effects(u8 opcode)
1184 {
1185         switch (opcode) {
1186         case nvme_admin_format_nvm:
1187                 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
1188                                         NVME_CMD_EFFECTS_CSE_MASK;
1189         case nvme_admin_sanitize_nvm:
1190                 return NVME_CMD_EFFECTS_CSE_MASK;
1191         default:
1192                 break;
1193         }
1194         return 0;
1195 }
1196
1197 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1198                                                                 u8 opcode)
1199 {
1200         u32 effects = 0;
1201
1202         if (ns) {
1203                 if (ctrl->effects)
1204                         effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
1205                 if (effects & ~NVME_CMD_EFFECTS_CSUPP)
1206                         dev_warn(ctrl->device,
1207                                  "IO command:%02x has unhandled effects:%08x\n",
1208                                  opcode, effects);
1209                 return 0;
1210         }
1211
1212         if (ctrl->effects)
1213                 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1214         else
1215                 effects = nvme_known_admin_effects(opcode);
1216
1217         /*
1218          * For simplicity, IO to all namespaces is quiesced even if the command
1219          * effects say only one namespace is affected.
1220          */
1221         if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1222                 mutex_lock(&ctrl->scan_lock);
1223                 mutex_lock(&ctrl->subsys->lock);
1224                 nvme_mpath_start_freeze(ctrl->subsys);
1225                 nvme_mpath_wait_freeze(ctrl->subsys);
1226                 nvme_start_freeze(ctrl);
1227                 nvme_wait_freeze(ctrl);
1228         }
1229         return effects;
1230 }
1231
1232 static void nvme_update_formats(struct nvme_ctrl *ctrl)
1233 {
1234         struct nvme_ns *ns;
1235
1236         down_read(&ctrl->namespaces_rwsem);
1237         list_for_each_entry(ns, &ctrl->namespaces, list)
1238                 if (ns->disk && nvme_revalidate_disk(ns->disk))
1239                         nvme_set_queue_dying(ns);
1240         up_read(&ctrl->namespaces_rwsem);
1241
1242         nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1243 }
1244
1245 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1246 {
1247         /*
1248          * Revalidate LBA changes prior to unfreezing. This is necessary to
1249          * prevent memory corruption if a logical block size was changed by
1250          * this command.
1251          */
1252         if (effects & NVME_CMD_EFFECTS_LBCC)
1253                 nvme_update_formats(ctrl);
1254         if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1255                 nvme_unfreeze(ctrl);
1256                 nvme_mpath_unfreeze(ctrl->subsys);
1257                 mutex_unlock(&ctrl->subsys->lock);
1258                 mutex_unlock(&ctrl->scan_lock);
1259         }
1260         if (effects & NVME_CMD_EFFECTS_CCC)
1261                 nvme_init_identify(ctrl);
1262         if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
1263                 nvme_queue_scan(ctrl);
1264 }
1265
1266 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1267                         struct nvme_passthru_cmd __user *ucmd)
1268 {
1269         struct nvme_passthru_cmd cmd;
1270         struct nvme_command c;
1271         unsigned timeout = 0;
1272         u32 effects;
1273         int status;
1274
1275         if (!capable(CAP_SYS_ADMIN))
1276                 return -EACCES;
1277         if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1278                 return -EFAULT;
1279         if (cmd.flags)
1280                 return -EINVAL;
1281
1282         memset(&c, 0, sizeof(c));
1283         c.common.opcode = cmd.opcode;
1284         c.common.flags = cmd.flags;
1285         c.common.nsid = cpu_to_le32(cmd.nsid);
1286         c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1287         c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1288         c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1289         c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1290         c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1291         c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1292         c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1293         c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1294
1295         if (cmd.timeout_ms)
1296                 timeout = msecs_to_jiffies(cmd.timeout_ms);
1297
1298         effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1299         status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1300                         (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1301                         (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
1302                         0, &cmd.result, timeout);
1303         nvme_passthru_end(ctrl, effects);
1304
1305         if (status >= 0) {
1306                 if (put_user(cmd.result, &ucmd->result))
1307                         return -EFAULT;
1308         }
1309
1310         return status;
1311 }
1312
1313 /*
1314  * Issue ioctl requests on the first available path.  Note that unlike normal
1315  * block layer requests we will not retry failed request on another controller.
1316  */
1317 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1318                 struct nvme_ns_head **head, int *srcu_idx)
1319 {
1320 #ifdef CONFIG_NVME_MULTIPATH
1321         if (disk->fops == &nvme_ns_head_ops) {
1322                 struct nvme_ns *ns;
1323
1324                 *head = disk->private_data;
1325                 *srcu_idx = srcu_read_lock(&(*head)->srcu);
1326                 ns = nvme_find_path(*head);
1327                 if (!ns)
1328                         srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1329                 return ns;
1330         }
1331 #endif
1332         *head = NULL;
1333         *srcu_idx = -1;
1334         return disk->private_data;
1335 }
1336
1337 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1338 {
1339         if (head)
1340                 srcu_read_unlock(&head->srcu, idx);
1341 }
1342
1343 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1344                 unsigned int cmd, unsigned long arg)
1345 {
1346         struct nvme_ns_head *head = NULL;
1347         void __user *argp = (void __user *)arg;
1348         struct nvme_ns *ns;
1349         int srcu_idx, ret;
1350
1351         ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1352         if (unlikely(!ns))
1353                 return -EWOULDBLOCK;
1354
1355         /*
1356          * Handle ioctls that apply to the controller instead of the namespace
1357          * seperately and drop the ns SRCU reference early.  This avoids a
1358          * deadlock when deleting namespaces using the passthrough interface.
1359          */
1360         if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
1361                 struct nvme_ctrl *ctrl = ns->ctrl;
1362
1363                 nvme_get_ctrl(ns->ctrl);
1364                 nvme_put_ns_from_disk(head, srcu_idx);
1365
1366                 if (cmd == NVME_IOCTL_ADMIN_CMD)
1367                         ret = nvme_user_cmd(ctrl, NULL, argp);
1368                 else
1369                         ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1370
1371                 nvme_put_ctrl(ctrl);
1372                 return ret;
1373         }
1374
1375         switch (cmd) {
1376         case NVME_IOCTL_ID:
1377                 force_successful_syscall_return();
1378                 ret = ns->head->ns_id;
1379                 break;
1380         case NVME_IOCTL_IO_CMD:
1381                 ret = nvme_user_cmd(ns->ctrl, ns, argp);
1382                 break;
1383         case NVME_IOCTL_SUBMIT_IO:
1384                 ret = nvme_submit_io(ns, argp);
1385                 break;
1386         default:
1387                 if (ns->ndev)
1388                         ret = nvme_nvm_ioctl(ns, cmd, arg);
1389                 else
1390                         ret = -ENOTTY;
1391         }
1392
1393         nvme_put_ns_from_disk(head, srcu_idx);
1394         return ret;
1395 }
1396
1397 static int nvme_open(struct block_device *bdev, fmode_t mode)
1398 {
1399         struct nvme_ns *ns = bdev->bd_disk->private_data;
1400
1401 #ifdef CONFIG_NVME_MULTIPATH
1402         /* should never be called due to GENHD_FL_HIDDEN */
1403         if (WARN_ON_ONCE(ns->head->disk))
1404                 goto fail;
1405 #endif
1406         if (!kref_get_unless_zero(&ns->kref))
1407                 goto fail;
1408         if (!try_module_get(ns->ctrl->ops->module))
1409                 goto fail_put_ns;
1410
1411         return 0;
1412
1413 fail_put_ns:
1414         nvme_put_ns(ns);
1415 fail:
1416         return -ENXIO;
1417 }
1418
1419 static void nvme_release(struct gendisk *disk, fmode_t mode)
1420 {
1421         struct nvme_ns *ns = disk->private_data;
1422
1423         module_put(ns->ctrl->ops->module);
1424         nvme_put_ns(ns);
1425 }
1426
1427 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1428 {
1429         /* some standard values */
1430         geo->heads = 1 << 6;
1431         geo->sectors = 1 << 5;
1432         geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1433         return 0;
1434 }
1435
1436 #ifdef CONFIG_BLK_DEV_INTEGRITY
1437 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1438 {
1439         struct blk_integrity integrity;
1440
1441         memset(&integrity, 0, sizeof(integrity));
1442         switch (pi_type) {
1443         case NVME_NS_DPS_PI_TYPE3:
1444                 integrity.profile = &t10_pi_type3_crc;
1445                 integrity.tag_size = sizeof(u16) + sizeof(u32);
1446                 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1447                 break;
1448         case NVME_NS_DPS_PI_TYPE1:
1449         case NVME_NS_DPS_PI_TYPE2:
1450                 integrity.profile = &t10_pi_type1_crc;
1451                 integrity.tag_size = sizeof(u16);
1452                 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1453                 break;
1454         default:
1455                 integrity.profile = NULL;
1456                 break;
1457         }
1458         integrity.tuple_size = ms;
1459         blk_integrity_register(disk, &integrity);
1460         blk_queue_max_integrity_segments(disk->queue, 1);
1461 }
1462 #else
1463 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1464 {
1465 }
1466 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1467
1468 static void nvme_set_chunk_size(struct nvme_ns *ns)
1469 {
1470         u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
1471         blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
1472 }
1473
1474 static void nvme_config_discard(struct nvme_ns *ns)
1475 {
1476         struct nvme_ctrl *ctrl = ns->ctrl;
1477         struct request_queue *queue = ns->queue;
1478         u32 size = queue_logical_block_size(queue);
1479
1480         if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
1481                 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1482                 return;
1483         }
1484
1485         if (ctrl->nr_streams && ns->sws && ns->sgs)
1486                 size *= ns->sws * ns->sgs;
1487
1488         BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1489                         NVME_DSM_MAX_RANGES);
1490
1491         queue->limits.discard_alignment = 0;
1492         queue->limits.discard_granularity = size;
1493
1494         /* If discard is already enabled, don't reset queue limits */
1495         if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1496                 return;
1497
1498         blk_queue_max_discard_sectors(queue, UINT_MAX);
1499         blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1500
1501         if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1502                 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1503 }
1504
1505 static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1506                 struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1507 {
1508         memset(ids, 0, sizeof(*ids));
1509
1510         if (ctrl->vs >= NVME_VS(1, 1, 0))
1511                 memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1512         if (ctrl->vs >= NVME_VS(1, 2, 0))
1513                 memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1514         if (ctrl->vs >= NVME_VS(1, 3, 0)) {
1515                  /* Don't treat error as fatal we potentially
1516                   * already have a NGUID or EUI-64
1517                   */
1518                 if (nvme_identify_ns_descs(ctrl, nsid, ids))
1519                         dev_warn(ctrl->device,
1520                                  "%s: Identify Descriptors failed\n", __func__);
1521         }
1522 }
1523
1524 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1525 {
1526         return !uuid_is_null(&ids->uuid) ||
1527                 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1528                 memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1529 }
1530
1531 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1532 {
1533         return uuid_equal(&a->uuid, &b->uuid) &&
1534                 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1535                 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
1536 }
1537
1538 static void nvme_update_disk_info(struct gendisk *disk,
1539                 struct nvme_ns *ns, struct nvme_id_ns *id)
1540 {
1541         sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
1542         unsigned short bs = 1 << ns->lba_shift;
1543
1544         if (ns->lba_shift > PAGE_SHIFT) {
1545                 /* unsupported block size, set capacity to 0 later */
1546                 bs = (1 << 9);
1547         }
1548         blk_mq_freeze_queue(disk->queue);
1549         blk_integrity_unregister(disk);
1550
1551         blk_queue_logical_block_size(disk->queue, bs);
1552         blk_queue_physical_block_size(disk->queue, bs);
1553         blk_queue_io_min(disk->queue, bs);
1554
1555         if (ns->ms && !ns->ext &&
1556             (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1557                 nvme_init_integrity(disk, ns->ms, ns->pi_type);
1558         if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
1559             ns->lba_shift > PAGE_SHIFT)
1560                 capacity = 0;
1561
1562         set_capacity(disk, capacity);
1563         nvme_config_discard(ns);
1564
1565         if (id->nsattr & (1 << 0))
1566                 set_disk_ro(disk, true);
1567         else
1568                 set_disk_ro(disk, false);
1569
1570         blk_mq_unfreeze_queue(disk->queue);
1571 }
1572
1573 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1574 {
1575         struct nvme_ns *ns = disk->private_data;
1576
1577         /*
1578          * If identify namespace failed, use default 512 byte block size so
1579          * block layer can use before failing read/write for 0 capacity.
1580          */
1581         ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1582         if (ns->lba_shift == 0)
1583                 ns->lba_shift = 9;
1584         ns->noiob = le16_to_cpu(id->noiob);
1585         ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1586         ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1587         /* the PI implementation requires metadata equal t10 pi tuple size */
1588         if (ns->ms == sizeof(struct t10_pi_tuple))
1589                 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1590         else
1591                 ns->pi_type = 0;
1592
1593         if (ns->noiob)
1594                 nvme_set_chunk_size(ns);
1595         nvme_update_disk_info(disk, ns, id);
1596         if (ns->ndev)
1597                 nvme_nvm_update_nvm_info(ns);
1598 #ifdef CONFIG_NVME_MULTIPATH
1599         if (ns->head->disk) {
1600                 nvme_update_disk_info(ns->head->disk, ns, id);
1601                 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1602                 nvme_mpath_update_disk_size(ns->head->disk);
1603         }
1604 #endif
1605 }
1606
1607 static int nvme_revalidate_disk(struct gendisk *disk)
1608 {
1609         struct nvme_ns *ns = disk->private_data;
1610         struct nvme_ctrl *ctrl = ns->ctrl;
1611         struct nvme_id_ns *id;
1612         struct nvme_ns_ids ids;
1613         int ret = 0;
1614
1615         if (test_bit(NVME_NS_DEAD, &ns->flags)) {
1616                 set_capacity(disk, 0);
1617                 return -ENODEV;
1618         }
1619
1620         id = nvme_identify_ns(ctrl, ns->head->ns_id);
1621         if (!id)
1622                 return -ENODEV;
1623
1624         if (id->ncap == 0) {
1625                 ret = -ENODEV;
1626                 goto out;
1627         }
1628
1629         __nvme_revalidate_disk(disk, id);
1630         nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
1631         if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
1632                 dev_err(ctrl->device,
1633                         "identifiers changed for nsid %d\n", ns->head->ns_id);
1634                 ret = -ENODEV;
1635         }
1636
1637 out:
1638         kfree(id);
1639         return ret;
1640 }
1641
1642 static char nvme_pr_type(enum pr_type type)
1643 {
1644         switch (type) {
1645         case PR_WRITE_EXCLUSIVE:
1646                 return 1;
1647         case PR_EXCLUSIVE_ACCESS:
1648                 return 2;
1649         case PR_WRITE_EXCLUSIVE_REG_ONLY:
1650                 return 3;
1651         case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1652                 return 4;
1653         case PR_WRITE_EXCLUSIVE_ALL_REGS:
1654                 return 5;
1655         case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1656                 return 6;
1657         default:
1658                 return 0;
1659         }
1660 };
1661
1662 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
1663                                 u64 key, u64 sa_key, u8 op)
1664 {
1665         struct nvme_ns_head *head = NULL;
1666         struct nvme_ns *ns;
1667         struct nvme_command c;
1668         int srcu_idx, ret;
1669         u8 data[16] = { 0, };
1670
1671         ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1672         if (unlikely(!ns))
1673                 return -EWOULDBLOCK;
1674
1675         put_unaligned_le64(key, &data[0]);
1676         put_unaligned_le64(sa_key, &data[8]);
1677
1678         memset(&c, 0, sizeof(c));
1679         c.common.opcode = op;
1680         c.common.nsid = cpu_to_le32(ns->head->ns_id);
1681         c.common.cdw10[0] = cpu_to_le32(cdw10);
1682
1683         ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
1684         nvme_put_ns_from_disk(head, srcu_idx);
1685         return ret;
1686 }
1687
1688 static int nvme_pr_register(struct block_device *bdev, u64 old,
1689                 u64 new, unsigned flags)
1690 {
1691         u32 cdw10;
1692
1693         if (flags & ~PR_FL_IGNORE_KEY)
1694                 return -EOPNOTSUPP;
1695
1696         cdw10 = old ? 2 : 0;
1697         cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
1698         cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
1699         return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
1700 }
1701
1702 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
1703                 enum pr_type type, unsigned flags)
1704 {
1705         u32 cdw10;
1706
1707         if (flags & ~PR_FL_IGNORE_KEY)
1708                 return -EOPNOTSUPP;
1709
1710         cdw10 = nvme_pr_type(type) << 8;
1711         cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
1712         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
1713 }
1714
1715 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
1716                 enum pr_type type, bool abort)
1717 {
1718         u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
1719         return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
1720 }
1721
1722 static int nvme_pr_clear(struct block_device *bdev, u64 key)
1723 {
1724         u32 cdw10 = 1 | (key ? 1 << 3 : 0);
1725         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
1726 }
1727
1728 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1729 {
1730         u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
1731         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
1732 }
1733
1734 static const struct pr_ops nvme_pr_ops = {
1735         .pr_register    = nvme_pr_register,
1736         .pr_reserve     = nvme_pr_reserve,
1737         .pr_release     = nvme_pr_release,
1738         .pr_preempt     = nvme_pr_preempt,
1739         .pr_clear       = nvme_pr_clear,
1740 };
1741
1742 #ifdef CONFIG_BLK_SED_OPAL
1743 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
1744                 bool send)
1745 {
1746         struct nvme_ctrl *ctrl = data;
1747         struct nvme_command cmd;
1748
1749         memset(&cmd, 0, sizeof(cmd));
1750         if (send)
1751                 cmd.common.opcode = nvme_admin_security_send;
1752         else
1753                 cmd.common.opcode = nvme_admin_security_recv;
1754         cmd.common.nsid = 0;
1755         cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
1756         cmd.common.cdw10[1] = cpu_to_le32(len);
1757
1758         return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
1759                                       ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0);
1760 }
1761 EXPORT_SYMBOL_GPL(nvme_sec_submit);
1762 #endif /* CONFIG_BLK_SED_OPAL */
1763
1764 static const struct block_device_operations nvme_fops = {
1765         .owner          = THIS_MODULE,
1766         .ioctl          = nvme_ioctl,
1767         .compat_ioctl   = nvme_ioctl,
1768         .open           = nvme_open,
1769         .release        = nvme_release,
1770         .getgeo         = nvme_getgeo,
1771         .revalidate_disk= nvme_revalidate_disk,
1772         .pr_ops         = &nvme_pr_ops,
1773 };
1774
1775 #ifdef CONFIG_NVME_MULTIPATH
1776 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
1777 {
1778         struct nvme_ns_head *head = bdev->bd_disk->private_data;
1779
1780         if (!kref_get_unless_zero(&head->ref))
1781                 return -ENXIO;
1782         return 0;
1783 }
1784
1785 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
1786 {
1787         nvme_put_ns_head(disk->private_data);
1788 }
1789
1790 const struct block_device_operations nvme_ns_head_ops = {
1791         .owner          = THIS_MODULE,
1792         .open           = nvme_ns_head_open,
1793         .release        = nvme_ns_head_release,
1794         .ioctl          = nvme_ioctl,
1795         .compat_ioctl   = nvme_ioctl,
1796         .getgeo         = nvme_getgeo,
1797         .pr_ops         = &nvme_pr_ops,
1798 };
1799 #endif /* CONFIG_NVME_MULTIPATH */
1800
1801 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
1802 {
1803         unsigned long timeout =
1804                 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1805         u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
1806         int ret;
1807
1808         while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1809                 if (csts == ~0)
1810                         return -ENODEV;
1811                 if ((csts & NVME_CSTS_RDY) == bit)
1812                         break;
1813
1814                 msleep(100);
1815                 if (fatal_signal_pending(current))
1816                         return -EINTR;
1817                 if (time_after(jiffies, timeout)) {
1818                         dev_err(ctrl->device,
1819                                 "Device not ready; aborting %s\n", enabled ?
1820                                                 "initialisation" : "reset");
1821                         return -ENODEV;
1822                 }
1823         }
1824
1825         return ret;
1826 }
1827
1828 /*
1829  * If the device has been passed off to us in an enabled state, just clear
1830  * the enabled bit.  The spec says we should set the 'shutdown notification
1831  * bits', but doing so may cause the device to complete commands to the
1832  * admin queue ... and we don't know what memory that might be pointing at!
1833  */
1834 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1835 {
1836         int ret;
1837
1838         ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1839         ctrl->ctrl_config &= ~NVME_CC_ENABLE;
1840
1841         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1842         if (ret)
1843                 return ret;
1844
1845         if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
1846                 msleep(NVME_QUIRK_DELAY_AMOUNT);
1847
1848         return nvme_wait_ready(ctrl, cap, false);
1849 }
1850 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
1851
1852 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1853 {
1854         /*
1855          * Default to a 4K page size, with the intention to update this
1856          * path in the future to accomodate architectures with differing
1857          * kernel and IO page sizes.
1858          */
1859         unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
1860         int ret;
1861
1862         if (page_shift < dev_page_min) {
1863                 dev_err(ctrl->device,
1864                         "Minimum device page size %u too large for host (%u)\n",
1865                         1 << dev_page_min, 1 << page_shift);
1866                 return -ENODEV;
1867         }
1868
1869         ctrl->page_size = 1 << page_shift;
1870
1871         ctrl->ctrl_config = NVME_CC_CSS_NVM;
1872         ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
1873         ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
1874         ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
1875         ctrl->ctrl_config |= NVME_CC_ENABLE;
1876
1877         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1878         if (ret)
1879                 return ret;
1880         return nvme_wait_ready(ctrl, cap, true);
1881 }
1882 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
1883
1884 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
1885 {
1886         unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
1887         u32 csts;
1888         int ret;
1889
1890         ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1891         ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
1892
1893         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1894         if (ret)
1895                 return ret;
1896
1897         while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1898                 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
1899                         break;
1900
1901                 msleep(100);
1902                 if (fatal_signal_pending(current))
1903                         return -EINTR;
1904                 if (time_after(jiffies, timeout)) {
1905                         dev_err(ctrl->device,
1906                                 "Device shutdown incomplete; abort shutdown\n");
1907                         return -ENODEV;
1908                 }
1909         }
1910
1911         return ret;
1912 }
1913 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
1914
1915 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1916                 struct request_queue *q)
1917 {
1918         bool vwc = false;
1919
1920         if (ctrl->max_hw_sectors) {
1921                 u32 max_segments =
1922                         (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
1923
1924                 max_segments = min_not_zero(max_segments, ctrl->max_segments);
1925                 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1926                 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1927         }
1928         if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1929             is_power_of_2(ctrl->max_hw_sectors))
1930                 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
1931         blk_queue_virt_boundary(q, ctrl->page_size - 1);
1932         if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1933                 vwc = true;
1934         blk_queue_write_cache(q, vwc, vwc);
1935 }
1936
1937 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
1938 {
1939         __le64 ts;
1940         int ret;
1941
1942         if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
1943                 return 0;
1944
1945         ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
1946         ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
1947                         NULL);
1948         if (ret)
1949                 dev_warn_once(ctrl->device,
1950                         "could not set timestamp (%d)\n", ret);
1951         return ret;
1952 }
1953
1954 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
1955 {
1956         /*
1957          * APST (Autonomous Power State Transition) lets us program a
1958          * table of power state transitions that the controller will
1959          * perform automatically.  We configure it with a simple
1960          * heuristic: we are willing to spend at most 2% of the time
1961          * transitioning between power states.  Therefore, when running
1962          * in any given state, we will enter the next lower-power
1963          * non-operational state after waiting 50 * (enlat + exlat)
1964          * microseconds, as long as that state's exit latency is under
1965          * the requested maximum latency.
1966          *
1967          * We will not autonomously enter any non-operational state for
1968          * which the total latency exceeds ps_max_latency_us.  Users
1969          * can set ps_max_latency_us to zero to turn off APST.
1970          */
1971
1972         unsigned apste;
1973         struct nvme_feat_auto_pst *table;
1974         u64 max_lat_us = 0;
1975         int max_ps = -1;
1976         int ret;
1977
1978         /*
1979          * If APST isn't supported or if we haven't been initialized yet,
1980          * then don't do anything.
1981          */
1982         if (!ctrl->apsta)
1983                 return 0;
1984
1985         if (ctrl->npss > 31) {
1986                 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
1987                 return 0;
1988         }
1989
1990         table = kzalloc(sizeof(*table), GFP_KERNEL);
1991         if (!table)
1992                 return 0;
1993
1994         if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
1995                 /* Turn off APST. */
1996                 apste = 0;
1997                 dev_dbg(ctrl->device, "APST disabled\n");
1998         } else {
1999                 __le64 target = cpu_to_le64(0);
2000                 int state;
2001
2002                 /*
2003                  * Walk through all states from lowest- to highest-power.
2004                  * According to the spec, lower-numbered states use more
2005                  * power.  NPSS, despite the name, is the index of the
2006                  * lowest-power state, not the number of states.
2007                  */
2008                 for (state = (int)ctrl->npss; state >= 0; state--) {
2009                         u64 total_latency_us, exit_latency_us, transition_ms;
2010
2011                         if (target)
2012                                 table->entries[state] = target;
2013
2014                         /*
2015                          * Don't allow transitions to the deepest state
2016                          * if it's quirked off.
2017                          */
2018                         if (state == ctrl->npss &&
2019                             (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2020                                 continue;
2021
2022                         /*
2023                          * Is this state a useful non-operational state for
2024                          * higher-power states to autonomously transition to?
2025                          */
2026                         if (!(ctrl->psd[state].flags &
2027                               NVME_PS_FLAGS_NON_OP_STATE))
2028                                 continue;
2029
2030                         exit_latency_us =
2031                                 (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2032                         if (exit_latency_us > ctrl->ps_max_latency_us)
2033                                 continue;
2034
2035                         total_latency_us =
2036                                 exit_latency_us +
2037                                 le32_to_cpu(ctrl->psd[state].entry_lat);
2038
2039                         /*
2040                          * This state is good.  Use it as the APST idle
2041                          * target for higher power states.
2042                          */
2043                         transition_ms = total_latency_us + 19;
2044                         do_div(transition_ms, 20);
2045                         if (transition_ms > (1 << 24) - 1)
2046                                 transition_ms = (1 << 24) - 1;
2047
2048                         target = cpu_to_le64((state << 3) |
2049                                              (transition_ms << 8));
2050
2051                         if (max_ps == -1)
2052                                 max_ps = state;
2053
2054                         if (total_latency_us > max_lat_us)
2055                                 max_lat_us = total_latency_us;
2056                 }
2057
2058                 apste = 1;
2059
2060                 if (max_ps == -1) {
2061                         dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2062                 } else {
2063                         dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2064                                 max_ps, max_lat_us, (int)sizeof(*table), table);
2065                 }
2066         }
2067
2068         ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2069                                 table, sizeof(*table), NULL);
2070         if (ret)
2071                 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2072
2073         kfree(table);
2074         return ret;
2075 }
2076
2077 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2078 {
2079         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2080         u64 latency;
2081
2082         switch (val) {
2083         case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2084         case PM_QOS_LATENCY_ANY:
2085                 latency = U64_MAX;
2086                 break;
2087
2088         default:
2089                 latency = val;
2090         }
2091
2092         if (ctrl->ps_max_latency_us != latency) {
2093                 ctrl->ps_max_latency_us = latency;
2094                 if (ctrl->state == NVME_CTRL_LIVE)
2095                         nvme_configure_apst(ctrl);
2096         }
2097 }
2098
2099 struct nvme_core_quirk_entry {
2100         /*
2101          * NVMe model and firmware strings are padded with spaces.  For
2102          * simplicity, strings in the quirk table are padded with NULLs
2103          * instead.
2104          */
2105         u16 vid;
2106         const char *mn;
2107         const char *fr;
2108         unsigned long quirks;
2109 };
2110
2111 static const struct nvme_core_quirk_entry core_quirks[] = {
2112         {
2113                 /*
2114                  * This Toshiba device seems to die using any APST states.  See:
2115                  * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2116                  */
2117                 .vid = 0x1179,
2118                 .mn = "THNSF5256GPUK TOSHIBA",
2119                 .quirks = NVME_QUIRK_NO_APST,
2120         }
2121 };
2122
2123 /* match is null-terminated but idstr is space-padded. */
2124 static bool string_matches(const char *idstr, const char *match, size_t len)
2125 {
2126         size_t matchlen;
2127
2128         if (!match)
2129                 return true;
2130
2131         matchlen = strlen(match);
2132         WARN_ON_ONCE(matchlen > len);
2133
2134         if (memcmp(idstr, match, matchlen))
2135                 return false;
2136
2137         for (; matchlen < len; matchlen++)
2138                 if (idstr[matchlen] != ' ')
2139                         return false;
2140
2141         return true;
2142 }
2143
2144 static bool quirk_matches(const struct nvme_id_ctrl *id,
2145                           const struct nvme_core_quirk_entry *q)
2146 {
2147         return q->vid == le16_to_cpu(id->vid) &&
2148                 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2149                 string_matches(id->fr, q->fr, sizeof(id->fr));
2150 }
2151
2152 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2153                 struct nvme_id_ctrl *id)
2154 {
2155         size_t nqnlen;
2156         int off;
2157
2158         nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2159         if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2160                 strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2161                 return;
2162         }
2163
2164         if (ctrl->vs >= NVME_VS(1, 2, 1))
2165                 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2166
2167         /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2168         off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2169                         "nqn.2014.08.org.nvmexpress:%04x%04x",
2170                         le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2171         memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2172         off += sizeof(id->sn);
2173         memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2174         off += sizeof(id->mn);
2175         memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2176 }
2177
2178 static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
2179 {
2180         ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
2181         kfree(subsys);
2182 }
2183
2184 static void nvme_release_subsystem(struct device *dev)
2185 {
2186         __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
2187 }
2188
2189 static void nvme_destroy_subsystem(struct kref *ref)
2190 {
2191         struct nvme_subsystem *subsys =
2192                         container_of(ref, struct nvme_subsystem, ref);
2193
2194         mutex_lock(&nvme_subsystems_lock);
2195         list_del(&subsys->entry);
2196         mutex_unlock(&nvme_subsystems_lock);
2197
2198         ida_destroy(&subsys->ns_ida);
2199         device_del(&subsys->dev);
2200         put_device(&subsys->dev);
2201 }
2202
2203 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2204 {
2205         kref_put(&subsys->ref, nvme_destroy_subsystem);
2206 }
2207
2208 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2209 {
2210         struct nvme_subsystem *subsys;
2211
2212         lockdep_assert_held(&nvme_subsystems_lock);
2213
2214         /*
2215          * Fail matches for discovery subsystems. This results
2216          * in each discovery controller bound to a unique subsystem.
2217          * This avoids issues with validating controller values
2218          * that can only be true when there is a single unique subsystem.
2219          * There may be multiple and completely independent entities
2220          * that provide discovery controllers.
2221          */
2222         if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2223                 return NULL;
2224
2225         list_for_each_entry(subsys, &nvme_subsystems, entry) {
2226                 if (strcmp(subsys->subnqn, subsysnqn))
2227                         continue;
2228                 if (!kref_get_unless_zero(&subsys->ref))
2229                         continue;
2230                 return subsys;
2231         }
2232
2233         return NULL;
2234 }
2235
2236 #define SUBSYS_ATTR_RO(_name, _mode, _show)                     \
2237         struct device_attribute subsys_attr_##_name = \
2238                 __ATTR(_name, _mode, _show, NULL)
2239
2240 static ssize_t nvme_subsys_show_nqn(struct device *dev,
2241                                     struct device_attribute *attr,
2242                                     char *buf)
2243 {
2244         struct nvme_subsystem *subsys =
2245                 container_of(dev, struct nvme_subsystem, dev);
2246
2247         return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
2248 }
2249 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2250
2251 #define nvme_subsys_show_str_function(field)                            \
2252 static ssize_t subsys_##field##_show(struct device *dev,                \
2253                             struct device_attribute *attr, char *buf)   \
2254 {                                                                       \
2255         struct nvme_subsystem *subsys =                                 \
2256                 container_of(dev, struct nvme_subsystem, dev);          \
2257         return sprintf(buf, "%.*s\n",                                   \
2258                        (int)sizeof(subsys->field), subsys->field);      \
2259 }                                                                       \
2260 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2261
2262 nvme_subsys_show_str_function(model);
2263 nvme_subsys_show_str_function(serial);
2264 nvme_subsys_show_str_function(firmware_rev);
2265
2266 static struct attribute *nvme_subsys_attrs[] = {
2267         &subsys_attr_model.attr,
2268         &subsys_attr_serial.attr,
2269         &subsys_attr_firmware_rev.attr,
2270         &subsys_attr_subsysnqn.attr,
2271         NULL,
2272 };
2273
2274 static struct attribute_group nvme_subsys_attrs_group = {
2275         .attrs = nvme_subsys_attrs,
2276 };
2277
2278 static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2279         &nvme_subsys_attrs_group,
2280         NULL,
2281 };
2282
2283 static int nvme_active_ctrls(struct nvme_subsystem *subsys)
2284 {
2285         int count = 0;
2286         struct nvme_ctrl *ctrl;
2287
2288         mutex_lock(&subsys->lock);
2289         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
2290                 if (ctrl->state != NVME_CTRL_DELETING &&
2291                     ctrl->state != NVME_CTRL_DEAD)
2292                         count++;
2293         }
2294         mutex_unlock(&subsys->lock);
2295
2296         return count;
2297 }
2298
2299 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2300 {
2301         struct nvme_subsystem *subsys, *found;
2302         int ret;
2303
2304         subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2305         if (!subsys)
2306                 return -ENOMEM;
2307         ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL);
2308         if (ret < 0) {
2309                 kfree(subsys);
2310                 return ret;
2311         }
2312         subsys->instance = ret;
2313         mutex_init(&subsys->lock);
2314         kref_init(&subsys->ref);
2315         INIT_LIST_HEAD(&subsys->ctrls);
2316         INIT_LIST_HEAD(&subsys->nsheads);
2317         nvme_init_subnqn(subsys, ctrl, id);
2318         memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2319         memcpy(subsys->model, id->mn, sizeof(subsys->model));
2320         memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2321         subsys->vendor_id = le16_to_cpu(id->vid);
2322         subsys->cmic = id->cmic;
2323
2324         subsys->dev.class = nvme_subsys_class;
2325         subsys->dev.release = nvme_release_subsystem;
2326         subsys->dev.groups = nvme_subsys_attrs_groups;
2327         dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance);
2328         device_initialize(&subsys->dev);
2329
2330         mutex_lock(&nvme_subsystems_lock);
2331         found = __nvme_find_get_subsystem(subsys->subnqn);
2332         if (found) {
2333                 /*
2334                  * Verify that the subsystem actually supports multiple
2335                  * controllers, else bail out.
2336                  */
2337                 if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
2338                     nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
2339                         dev_err(ctrl->device,
2340                                 "ignoring ctrl due to duplicate subnqn (%s).\n",
2341                                 found->subnqn);
2342                         nvme_put_subsystem(found);
2343                         ret = -EINVAL;
2344                         goto out_unlock;
2345                 }
2346
2347                 __nvme_release_subsystem(subsys);
2348                 subsys = found;
2349         } else {
2350                 ret = device_add(&subsys->dev);
2351                 if (ret) {
2352                         dev_err(ctrl->device,
2353                                 "failed to register subsystem device.\n");
2354                         goto out_unlock;
2355                 }
2356                 ida_init(&subsys->ns_ida);
2357                 list_add_tail(&subsys->entry, &nvme_subsystems);
2358         }
2359
2360         ctrl->subsys = subsys;
2361         mutex_unlock(&nvme_subsystems_lock);
2362
2363         if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2364                         dev_name(ctrl->device))) {
2365                 dev_err(ctrl->device,
2366                         "failed to create sysfs link from subsystem.\n");
2367                 /* the transport driver will eventually put the subsystem */
2368                 return -EINVAL;
2369         }
2370
2371         mutex_lock(&subsys->lock);
2372         list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2373         mutex_unlock(&subsys->lock);
2374
2375         return 0;
2376
2377 out_unlock:
2378         mutex_unlock(&nvme_subsystems_lock);
2379         put_device(&subsys->dev);
2380         return ret;
2381 }
2382
2383 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
2384                 void *log, size_t size, u64 offset)
2385 {
2386         struct nvme_command c = { };
2387         unsigned long dwlen = size / 4 - 1;
2388
2389         c.get_log_page.opcode = nvme_admin_get_log_page;
2390         c.get_log_page.nsid = cpu_to_le32(nsid);
2391         c.get_log_page.lid = log_page;
2392         c.get_log_page.lsp = lsp;
2393         c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2394         c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2395         c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2396         c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2397
2398         return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2399 }
2400
2401 static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
2402 {
2403         int ret;
2404
2405         if (!ctrl->effects)
2406                 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
2407
2408         if (!ctrl->effects)
2409                 return 0;
2410
2411         ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
2412                         ctrl->effects, sizeof(*ctrl->effects), 0);
2413         if (ret) {
2414                 kfree(ctrl->effects);
2415                 ctrl->effects = NULL;
2416         }
2417         return ret;
2418 }
2419
2420 /*
2421  * Initialize the cached copies of the Identify data and various controller
2422  * register in our nvme_ctrl structure.  This should be called as soon as
2423  * the admin queue is fully up and running.
2424  */
2425 int nvme_init_identify(struct nvme_ctrl *ctrl)
2426 {
2427         struct nvme_id_ctrl *id;
2428         u64 cap;
2429         int ret, page_shift;
2430         u32 max_hw_sectors;
2431         bool prev_apst_enabled;
2432
2433         ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
2434         if (ret) {
2435                 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
2436                 return ret;
2437         }
2438
2439         ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
2440         if (ret) {
2441                 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2442                 return ret;
2443         }
2444         page_shift = NVME_CAP_MPSMIN(cap) + 12;
2445
2446         if (ctrl->vs >= NVME_VS(1, 1, 0))
2447                 ctrl->subsystem = NVME_CAP_NSSRC(cap);
2448
2449         ret = nvme_identify_ctrl(ctrl, &id);
2450         if (ret) {
2451                 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2452                 return -EIO;
2453         }
2454
2455         if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2456                 ret = nvme_get_effects_log(ctrl);
2457                 if (ret < 0)
2458                         goto out_free;
2459         }
2460
2461         if (!ctrl->identified) {
2462                 int i;
2463
2464                 ret = nvme_init_subsystem(ctrl, id);
2465                 if (ret)
2466                         goto out_free;
2467
2468                 /*
2469                  * Check for quirks.  Quirk can depend on firmware version,
2470                  * so, in principle, the set of quirks present can change
2471                  * across a reset.  As a possible future enhancement, we
2472                  * could re-scan for quirks every time we reinitialize
2473                  * the device, but we'd have to make sure that the driver
2474                  * behaves intelligently if the quirks change.
2475                  */
2476                 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2477                         if (quirk_matches(id, &core_quirks[i]))
2478                                 ctrl->quirks |= core_quirks[i].quirks;
2479                 }
2480         }
2481
2482         if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2483                 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2484                 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2485         }
2486
2487         ctrl->oacs = le16_to_cpu(id->oacs);
2488         ctrl->oncs = le16_to_cpup(&id->oncs);
2489         ctrl->oaes = le32_to_cpu(id->oaes);
2490         atomic_set(&ctrl->abort_limit, id->acl + 1);
2491         ctrl->vwc = id->vwc;
2492         ctrl->cntlid = le16_to_cpup(&id->cntlid);
2493         if (id->mdts)
2494                 max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2495         else
2496                 max_hw_sectors = UINT_MAX;
2497         ctrl->max_hw_sectors =
2498                 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2499
2500         nvme_set_queue_limits(ctrl, ctrl->admin_q);
2501         ctrl->sgls = le32_to_cpu(id->sgls);
2502         ctrl->kas = le16_to_cpu(id->kas);
2503         ctrl->max_namespaces = le32_to_cpu(id->mnan);
2504
2505         if (id->rtd3e) {
2506                 /* us -> s */
2507                 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
2508
2509                 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2510                                                  shutdown_timeout, 60);
2511
2512                 if (ctrl->shutdown_timeout != shutdown_timeout)
2513                         dev_info(ctrl->device,
2514                                  "Shutdown timeout set to %u seconds\n",
2515                                  ctrl->shutdown_timeout);
2516         } else
2517                 ctrl->shutdown_timeout = shutdown_timeout;
2518
2519         ctrl->npss = id->npss;
2520         ctrl->apsta = id->apsta;
2521         prev_apst_enabled = ctrl->apst_enabled;
2522         if (ctrl->quirks & NVME_QUIRK_NO_APST) {
2523                 if (force_apst && id->apsta) {
2524                         dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2525                         ctrl->apst_enabled = true;
2526                 } else {
2527                         ctrl->apst_enabled = false;
2528                 }
2529         } else {
2530                 ctrl->apst_enabled = id->apsta;
2531         }
2532         memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
2533
2534         if (ctrl->ops->flags & NVME_F_FABRICS) {
2535                 ctrl->icdoff = le16_to_cpu(id->icdoff);
2536                 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
2537                 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
2538                 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
2539
2540                 /*
2541                  * In fabrics we need to verify the cntlid matches the
2542                  * admin connect
2543                  */
2544                 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2545                         ret = -EINVAL;
2546                         goto out_free;
2547                 }
2548
2549                 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
2550                         dev_err(ctrl->device,
2551                                 "keep-alive support is mandatory for fabrics\n");
2552                         ret = -EINVAL;
2553                         goto out_free;
2554                 }
2555         } else {
2556                 ctrl->cntlid = le16_to_cpu(id->cntlid);
2557                 ctrl->hmpre = le32_to_cpu(id->hmpre);
2558                 ctrl->hmmin = le32_to_cpu(id->hmmin);
2559                 ctrl->hmminds = le32_to_cpu(id->hmminds);
2560                 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2561         }
2562
2563         ret = nvme_mpath_init(ctrl, id);
2564         kfree(id);
2565
2566         if (ret < 0)
2567                 return ret;
2568
2569         if (ctrl->apst_enabled && !prev_apst_enabled)
2570                 dev_pm_qos_expose_latency_tolerance(ctrl->device);
2571         else if (!ctrl->apst_enabled && prev_apst_enabled)
2572                 dev_pm_qos_hide_latency_tolerance(ctrl->device);
2573
2574         ret = nvme_configure_apst(ctrl);
2575         if (ret < 0)
2576                 return ret;
2577         
2578         ret = nvme_configure_timestamp(ctrl);
2579         if (ret < 0)
2580                 return ret;
2581
2582         ret = nvme_configure_directives(ctrl);
2583         if (ret < 0)
2584                 return ret;
2585
2586         ctrl->identified = true;
2587
2588         return 0;
2589
2590 out_free:
2591         kfree(id);
2592         return ret;
2593 }
2594 EXPORT_SYMBOL_GPL(nvme_init_identify);
2595
2596 static int nvme_dev_open(struct inode *inode, struct file *file)
2597 {
2598         struct nvme_ctrl *ctrl =
2599                 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
2600
2601         switch (ctrl->state) {
2602         case NVME_CTRL_LIVE:
2603         case NVME_CTRL_ADMIN_ONLY:
2604                 break;
2605         default:
2606                 return -EWOULDBLOCK;
2607         }
2608
2609         nvme_get_ctrl(ctrl);
2610         if (!try_module_get(ctrl->ops->module)) {
2611                 nvme_put_ctrl(ctrl);
2612                 return -EINVAL;
2613         }
2614
2615         file->private_data = ctrl;
2616         return 0;
2617 }
2618
2619 static int nvme_dev_release(struct inode *inode, struct file *file)
2620 {
2621         struct nvme_ctrl *ctrl =
2622                 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
2623
2624         module_put(ctrl->ops->module);
2625         nvme_put_ctrl(ctrl);
2626         return 0;
2627 }
2628
2629 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
2630 {
2631         struct nvme_ns *ns;
2632         int ret;
2633
2634         down_read(&ctrl->namespaces_rwsem);
2635         if (list_empty(&ctrl->namespaces)) {
2636                 ret = -ENOTTY;
2637                 goto out_unlock;
2638         }
2639
2640         ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
2641         if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
2642                 dev_warn(ctrl->device,
2643                         "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
2644                 ret = -EINVAL;
2645                 goto out_unlock;
2646         }
2647
2648         dev_warn(ctrl->device,
2649                 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
2650         kref_get(&ns->kref);
2651         up_read(&ctrl->namespaces_rwsem);
2652
2653         ret = nvme_user_cmd(ctrl, ns, argp);
2654         nvme_put_ns(ns);
2655         return ret;
2656
2657 out_unlock:
2658         up_read(&ctrl->namespaces_rwsem);
2659         return ret;
2660 }
2661
2662 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
2663                 unsigned long arg)
2664 {
2665         struct nvme_ctrl *ctrl = file->private_data;
2666         void __user *argp = (void __user *)arg;
2667
2668         switch (cmd) {
2669         case NVME_IOCTL_ADMIN_CMD:
2670                 return nvme_user_cmd(ctrl, NULL, argp);
2671         case NVME_IOCTL_IO_CMD:
2672                 return nvme_dev_user_cmd(ctrl, argp);
2673         case NVME_IOCTL_RESET:
2674                 dev_warn(ctrl->device, "resetting controller\n");
2675                 return nvme_reset_ctrl_sync(ctrl);
2676         case NVME_IOCTL_SUBSYS_RESET:
2677                 return nvme_reset_subsystem(ctrl);
2678         case NVME_IOCTL_RESCAN:
2679                 nvme_queue_scan(ctrl);
2680                 return 0;
2681         default:
2682                 return -ENOTTY;
2683         }
2684 }
2685
2686 static const struct file_operations nvme_dev_fops = {
2687         .owner          = THIS_MODULE,
2688         .open           = nvme_dev_open,
2689         .release        = nvme_dev_release,
2690         .unlocked_ioctl = nvme_dev_ioctl,
2691         .compat_ioctl   = nvme_dev_ioctl,
2692 };
2693
2694 static ssize_t nvme_sysfs_reset(struct device *dev,
2695                                 struct device_attribute *attr, const char *buf,
2696                                 size_t count)
2697 {
2698         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2699         int ret;
2700
2701         ret = nvme_reset_ctrl_sync(ctrl);
2702         if (ret < 0)
2703                 return ret;
2704         return count;
2705 }
2706 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
2707
2708 static ssize_t nvme_sysfs_rescan(struct device *dev,
2709                                 struct device_attribute *attr, const char *buf,
2710                                 size_t count)
2711 {
2712         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2713
2714         nvme_queue_scan(ctrl);
2715         return count;
2716 }
2717 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
2718
2719 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
2720 {
2721         struct gendisk *disk = dev_to_disk(dev);
2722
2723         if (disk->fops == &nvme_fops)
2724                 return nvme_get_ns_from_dev(dev)->head;
2725         else
2726                 return disk->private_data;
2727 }
2728
2729 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
2730                 char *buf)
2731 {
2732         struct nvme_ns_head *head = dev_to_ns_head(dev);
2733         struct nvme_ns_ids *ids = &head->ids;
2734         struct nvme_subsystem *subsys = head->subsys;
2735         int serial_len = sizeof(subsys->serial);
2736         int model_len = sizeof(subsys->model);
2737
2738         if (!uuid_is_null(&ids->uuid))
2739                 return sprintf(buf, "uuid.%pU\n", &ids->uuid);
2740
2741         if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2742                 return sprintf(buf, "eui.%16phN\n", ids->nguid);
2743
2744         if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
2745                 return sprintf(buf, "eui.%8phN\n", ids->eui64);
2746
2747         while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
2748                                   subsys->serial[serial_len - 1] == '\0'))
2749                 serial_len--;
2750         while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
2751                                  subsys->model[model_len - 1] == '\0'))
2752                 model_len--;
2753
2754         return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
2755                 serial_len, subsys->serial, model_len, subsys->model,
2756                 head->ns_id);
2757 }
2758 static DEVICE_ATTR_RO(wwid);
2759
2760 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
2761                 char *buf)
2762 {
2763         return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
2764 }
2765 static DEVICE_ATTR_RO(nguid);
2766
2767 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
2768                 char *buf)
2769 {
2770         struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
2771
2772         /* For backward compatibility expose the NGUID to userspace if
2773          * we have no UUID set
2774          */
2775         if (uuid_is_null(&ids->uuid)) {
2776                 printk_ratelimited(KERN_WARNING
2777                                    "No UUID available providing old NGUID\n");
2778                 return sprintf(buf, "%pU\n", ids->nguid);
2779         }
2780         return sprintf(buf, "%pU\n", &ids->uuid);
2781 }
2782 static DEVICE_ATTR_RO(uuid);
2783
2784 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
2785                 char *buf)
2786 {
2787         return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
2788 }
2789 static DEVICE_ATTR_RO(eui);
2790
2791 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
2792                 char *buf)
2793 {
2794         return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
2795 }
2796 static DEVICE_ATTR_RO(nsid);
2797
2798 static struct attribute *nvme_ns_id_attrs[] = {
2799         &dev_attr_wwid.attr,
2800         &dev_attr_uuid.attr,
2801         &dev_attr_nguid.attr,
2802         &dev_attr_eui.attr,
2803         &dev_attr_nsid.attr,
2804 #ifdef CONFIG_NVME_MULTIPATH
2805         &dev_attr_ana_grpid.attr,
2806         &dev_attr_ana_state.attr,
2807 #endif
2808         NULL,
2809 };
2810
2811 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
2812                 struct attribute *a, int n)
2813 {
2814         struct device *dev = container_of(kobj, struct device, kobj);
2815         struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
2816
2817         if (a == &dev_attr_uuid.attr) {
2818                 if (uuid_is_null(&ids->uuid) &&
2819                     !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2820                         return 0;
2821         }
2822         if (a == &dev_attr_nguid.attr) {
2823                 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2824                         return 0;
2825         }
2826         if (a == &dev_attr_eui.attr) {
2827                 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
2828                         return 0;
2829         }
2830 #ifdef CONFIG_NVME_MULTIPATH
2831         if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
2832                 if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
2833                         return 0;
2834                 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
2835                         return 0;
2836         }
2837 #endif
2838         return a->mode;
2839 }
2840
2841 const struct attribute_group nvme_ns_id_attr_group = {
2842         .attrs          = nvme_ns_id_attrs,
2843         .is_visible     = nvme_ns_id_attrs_are_visible,
2844 };
2845
2846 const struct attribute_group *nvme_ns_id_attr_groups[] = {
2847         &nvme_ns_id_attr_group,
2848 #ifdef CONFIG_NVM
2849         &nvme_nvm_attr_group,
2850 #endif
2851         NULL,
2852 };
2853
2854 #define nvme_show_str_function(field)                                           \
2855 static ssize_t  field##_show(struct device *dev,                                \
2856                             struct device_attribute *attr, char *buf)           \
2857 {                                                                               \
2858         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);                          \
2859         return sprintf(buf, "%.*s\n",                                           \
2860                 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field);         \
2861 }                                                                               \
2862 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2863
2864 nvme_show_str_function(model);
2865 nvme_show_str_function(serial);
2866 nvme_show_str_function(firmware_rev);
2867
2868 #define nvme_show_int_function(field)                                           \
2869 static ssize_t  field##_show(struct device *dev,                                \
2870                             struct device_attribute *attr, char *buf)           \
2871 {                                                                               \
2872         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);                          \
2873         return sprintf(buf, "%d\n", ctrl->field);       \
2874 }                                                                               \
2875 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2876
2877 nvme_show_int_function(cntlid);
2878
2879 static ssize_t nvme_sysfs_delete(struct device *dev,
2880                                 struct device_attribute *attr, const char *buf,
2881                                 size_t count)
2882 {
2883         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2884
2885         /* Can't delete non-created controllers */
2886         if (!ctrl->created)
2887                 return -EBUSY;
2888
2889         if (device_remove_file_self(dev, attr))
2890                 nvme_delete_ctrl_sync(ctrl);
2891         return count;
2892 }
2893 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
2894
2895 static ssize_t nvme_sysfs_show_transport(struct device *dev,
2896                                          struct device_attribute *attr,
2897                                          char *buf)
2898 {
2899         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2900
2901         return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
2902 }
2903 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
2904
2905 static ssize_t nvme_sysfs_show_state(struct device *dev,
2906                                      struct device_attribute *attr,
2907                                      char *buf)
2908 {
2909         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2910         static const char *const state_name[] = {
2911                 [NVME_CTRL_NEW]         = "new",
2912                 [NVME_CTRL_LIVE]        = "live",
2913                 [NVME_CTRL_ADMIN_ONLY]  = "only-admin",
2914                 [NVME_CTRL_RESETTING]   = "resetting",
2915                 [NVME_CTRL_CONNECTING]  = "connecting",
2916                 [NVME_CTRL_DELETING]    = "deleting",
2917                 [NVME_CTRL_DEAD]        = "dead",
2918         };
2919
2920         if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
2921             state_name[ctrl->state])
2922                 return sprintf(buf, "%s\n", state_name[ctrl->state]);
2923
2924         return sprintf(buf, "unknown state\n");
2925 }
2926
2927 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
2928
2929 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
2930                                          struct device_attribute *attr,
2931                                          char *buf)
2932 {
2933         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2934
2935         return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
2936 }
2937 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
2938
2939 static ssize_t nvme_sysfs_show_address(struct device *dev,
2940                                          struct device_attribute *attr,
2941                                          char *buf)
2942 {
2943         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2944
2945         return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
2946 }
2947 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
2948
2949 static struct attribute *nvme_dev_attrs[] = {
2950         &dev_attr_reset_controller.attr,
2951         &dev_attr_rescan_controller.attr,
2952         &dev_attr_model.attr,
2953         &dev_attr_serial.attr,
2954         &dev_attr_firmware_rev.attr,
2955         &dev_attr_cntlid.attr,
2956         &dev_attr_delete_controller.attr,
2957         &dev_attr_transport.attr,
2958         &dev_attr_subsysnqn.attr,
2959         &dev_attr_address.attr,
2960         &dev_attr_state.attr,
2961         NULL
2962 };
2963
2964 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
2965                 struct attribute *a, int n)
2966 {
2967         struct device *dev = container_of(kobj, struct device, kobj);
2968         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2969
2970         if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
2971                 return 0;
2972         if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
2973                 return 0;
2974
2975         return a->mode;
2976 }
2977
2978 static struct attribute_group nvme_dev_attrs_group = {
2979         .attrs          = nvme_dev_attrs,
2980         .is_visible     = nvme_dev_attrs_are_visible,
2981 };
2982
2983 static const struct attribute_group *nvme_dev_attr_groups[] = {
2984         &nvme_dev_attrs_group,
2985         NULL,
2986 };
2987
2988 static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
2989                 unsigned nsid)
2990 {
2991         struct nvme_ns_head *h;
2992
2993         lockdep_assert_held(&subsys->lock);
2994
2995         list_for_each_entry(h, &subsys->nsheads, entry) {
2996                 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
2997                         return h;
2998         }
2999
3000         return NULL;
3001 }
3002
3003 static int __nvme_check_ids(struct nvme_subsystem *subsys,
3004                 struct nvme_ns_head *new)
3005 {
3006         struct nvme_ns_head *h;
3007
3008         lockdep_assert_held(&subsys->lock);
3009
3010         list_for_each_entry(h, &subsys->nsheads, entry) {
3011                 if (nvme_ns_ids_valid(&new->ids) &&
3012                     !list_empty(&h->list) &&
3013                     nvme_ns_ids_equal(&new->ids, &h->ids))
3014                         return -EINVAL;
3015         }
3016
3017         return 0;
3018 }
3019
3020 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3021                 unsigned nsid, struct nvme_id_ns *id)
3022 {
3023         struct nvme_ns_head *head;
3024         int ret = -ENOMEM;
3025
3026         head = kzalloc(sizeof(*head), GFP_KERNEL);
3027         if (!head)
3028                 goto out;
3029         ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3030         if (ret < 0)
3031                 goto out_free_head;
3032         head->instance = ret;
3033         INIT_LIST_HEAD(&head->list);
3034         ret = init_srcu_struct(&head->srcu);
3035         if (ret)
3036                 goto out_ida_remove;
3037         head->subsys = ctrl->subsys;
3038         head->ns_id = nsid;
3039         kref_init(&head->ref);
3040
3041         nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
3042
3043         ret = __nvme_check_ids(ctrl->subsys, head);
3044         if (ret) {
3045                 dev_err(ctrl->device,
3046                         "duplicate IDs for nsid %d\n", nsid);
3047                 goto out_cleanup_srcu;
3048         }
3049
3050         ret = nvme_mpath_alloc_disk(ctrl, head);
3051         if (ret)
3052                 goto out_cleanup_srcu;
3053
3054         list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3055
3056         kref_get(&ctrl->subsys->ref);
3057
3058         return head;
3059 out_cleanup_srcu:
3060         cleanup_srcu_struct(&head->srcu);
3061 out_ida_remove:
3062         ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3063 out_free_head:
3064         kfree(head);
3065 out:
3066         return ERR_PTR(ret);
3067 }
3068
3069 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3070                 struct nvme_id_ns *id)
3071 {
3072         struct nvme_ctrl *ctrl = ns->ctrl;
3073         bool is_shared = id->nmic & (1 << 0);
3074         struct nvme_ns_head *head = NULL;
3075         int ret = 0;
3076
3077         mutex_lock(&ctrl->subsys->lock);
3078         if (is_shared)
3079                 head = __nvme_find_ns_head(ctrl->subsys, nsid);
3080         if (!head) {
3081                 head = nvme_alloc_ns_head(ctrl, nsid, id);
3082                 if (IS_ERR(head)) {
3083                         ret = PTR_ERR(head);
3084                         goto out_unlock;
3085                 }
3086         } else {
3087                 struct nvme_ns_ids ids;
3088
3089                 nvme_report_ns_ids(ctrl, nsid, id, &ids);
3090                 if (!nvme_ns_ids_equal(&head->ids, &ids)) {
3091                         dev_err(ctrl->device,
3092                                 "IDs don't match for shared namespace %d\n",
3093                                         nsid);
3094                         ret = -EINVAL;
3095                         goto out_unlock;
3096                 }
3097         }
3098
3099         list_add_tail(&ns->siblings, &head->list);
3100         ns->head = head;
3101
3102 out_unlock:
3103         mutex_unlock(&ctrl->subsys->lock);
3104         return ret;
3105 }
3106
3107 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
3108 {
3109         struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
3110         struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
3111
3112         return nsa->head->ns_id - nsb->head->ns_id;
3113 }
3114
3115 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3116 {
3117         struct nvme_ns *ns, *ret = NULL;
3118
3119         down_read(&ctrl->namespaces_rwsem);
3120         list_for_each_entry(ns, &ctrl->namespaces, list) {
3121                 if (ns->head->ns_id == nsid) {
3122                         if (!kref_get_unless_zero(&ns->kref))
3123                                 continue;
3124                         ret = ns;
3125                         break;
3126                 }
3127                 if (ns->head->ns_id > nsid)
3128                         break;
3129         }
3130         up_read(&ctrl->namespaces_rwsem);
3131         return ret;
3132 }
3133
3134 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
3135 {
3136         struct streams_directive_params s;
3137         int ret;
3138
3139         if (!ctrl->nr_streams)
3140                 return 0;
3141
3142         ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
3143         if (ret)
3144                 return ret;
3145
3146         ns->sws = le32_to_cpu(s.sws);
3147         ns->sgs = le16_to_cpu(s.sgs);
3148
3149         if (ns->sws) {
3150                 unsigned int bs = 1 << ns->lba_shift;
3151
3152                 blk_queue_io_min(ns->queue, bs * ns->sws);
3153                 if (ns->sgs)
3154                         blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
3155         }
3156
3157         return 0;
3158 }
3159
3160 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3161 {
3162         struct nvme_ns *ns;
3163         struct gendisk *disk;
3164         struct nvme_id_ns *id;
3165         char disk_name[DISK_NAME_LEN];
3166         int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT;
3167
3168         ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3169         if (!ns)
3170                 return;
3171
3172         ns->queue = blk_mq_init_queue(ctrl->tagset);
3173         if (IS_ERR(ns->queue))
3174                 goto out_free_ns;
3175         blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3176         ns->queue->queuedata = ns;
3177         ns->ctrl = ctrl;
3178
3179         kref_init(&ns->kref);
3180         ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
3181
3182         blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
3183         nvme_set_queue_limits(ctrl, ns->queue);
3184
3185         id = nvme_identify_ns(ctrl, nsid);
3186         if (!id)
3187                 goto out_free_queue;
3188
3189         if (id->ncap == 0)
3190                 goto out_free_id;
3191
3192         if (nvme_init_ns_head(ns, nsid, id))
3193                 goto out_free_id;
3194         nvme_setup_streams_ns(ctrl, ns);
3195         nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3196
3197         if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3198                 if (nvme_nvm_register(ns, disk_name, node)) {
3199                         dev_warn(ctrl->device, "LightNVM init failure\n");
3200                         goto out_unlink_ns;
3201                 }
3202         }
3203
3204         disk = alloc_disk_node(0, node);
3205         if (!disk)
3206                 goto out_unlink_ns;
3207
3208         disk->fops = &nvme_fops;
3209         disk->private_data = ns;
3210         disk->queue = ns->queue;
3211         disk->flags = flags;
3212         memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
3213         ns->disk = disk;
3214
3215         __nvme_revalidate_disk(disk, id);
3216
3217         down_write(&ctrl->namespaces_rwsem);
3218         list_add_tail(&ns->list, &ctrl->namespaces);
3219         up_write(&ctrl->namespaces_rwsem);
3220
3221         nvme_get_ctrl(ctrl);
3222
3223         device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3224
3225         nvme_mpath_add_disk(ns, id);
3226         nvme_fault_inject_init(ns);
3227         kfree(id);
3228
3229         return;
3230  out_unlink_ns:
3231         mutex_lock(&ctrl->subsys->lock);
3232         list_del_rcu(&ns->siblings);
3233         mutex_unlock(&ctrl->subsys->lock);
3234  out_free_id:
3235         kfree(id);
3236  out_free_queue:
3237         blk_cleanup_queue(ns->queue);
3238  out_free_ns:
3239         kfree(ns);
3240 }
3241
3242 static void nvme_ns_remove(struct nvme_ns *ns)
3243 {
3244         if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3245                 return;
3246
3247         nvme_fault_inject_fini(ns);
3248
3249         mutex_lock(&ns->ctrl->subsys->lock);
3250         list_del_rcu(&ns->siblings);
3251         mutex_unlock(&ns->ctrl->subsys->lock);
3252         synchronize_rcu(); /* guarantee not available in head->list */
3253         nvme_mpath_clear_current_path(ns);
3254         synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
3255
3256         if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3257                 del_gendisk(ns->disk);
3258                 blk_cleanup_queue(ns->queue);
3259                 if (blk_get_integrity(ns->disk))
3260                         blk_integrity_unregister(ns->disk);
3261         }
3262
3263         down_write(&ns->ctrl->namespaces_rwsem);
3264         list_del_init(&ns->list);
3265         up_write(&ns->ctrl->namespaces_rwsem);
3266
3267         nvme_mpath_check_last_path(ns);
3268         nvme_put_ns(ns);
3269 }
3270
3271 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3272 {
3273         struct nvme_ns *ns;
3274
3275         ns = nvme_find_get_ns(ctrl, nsid);
3276         if (ns) {
3277                 if (ns->disk && revalidate_disk(ns->disk))
3278                         nvme_ns_remove(ns);
3279                 nvme_put_ns(ns);
3280         } else
3281                 nvme_alloc_ns(ctrl, nsid);
3282 }
3283
3284 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3285                                         unsigned nsid)
3286 {
3287         struct nvme_ns *ns, *next;
3288         LIST_HEAD(rm_list);
3289
3290         down_write(&ctrl->namespaces_rwsem);
3291         list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3292                 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3293                         list_move_tail(&ns->list, &rm_list);
3294         }
3295         up_write(&ctrl->namespaces_rwsem);
3296
3297         list_for_each_entry_safe(ns, next, &rm_list, list)
3298                 nvme_ns_remove(ns);
3299
3300 }
3301
3302 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
3303 {
3304         struct nvme_ns *ns;
3305         __le32 *ns_list;
3306         unsigned i, j, nsid, prev = 0;
3307         unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
3308         int ret = 0;
3309
3310         ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3311         if (!ns_list)
3312                 return -ENOMEM;
3313
3314         for (i = 0; i < num_lists; i++) {
3315                 ret = nvme_identify_ns_list(ctrl, prev, ns_list);
3316                 if (ret)
3317                         goto free;
3318
3319                 for (j = 0; j < min(nn, 1024U); j++) {
3320                         nsid = le32_to_cpu(ns_list[j]);
3321                         if (!nsid)
3322                                 goto out;
3323
3324                         nvme_validate_ns(ctrl, nsid);
3325
3326                         while (++prev < nsid) {
3327                                 ns = nvme_find_get_ns(ctrl, prev);
3328                                 if (ns) {
3329                                         nvme_ns_remove(ns);
3330                                         nvme_put_ns(ns);
3331                                 }
3332                         }
3333                 }
3334                 nn -= j;
3335         }
3336  out:
3337         nvme_remove_invalid_namespaces(ctrl, prev);
3338  free:
3339         kfree(ns_list);
3340         return ret;
3341 }
3342
3343 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
3344 {
3345         unsigned i;
3346
3347         for (i = 1; i <= nn; i++)
3348                 nvme_validate_ns(ctrl, i);
3349
3350         nvme_remove_invalid_namespaces(ctrl, nn);
3351 }
3352
3353 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3354 {
3355         size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3356         __le32 *log;
3357         int error;
3358
3359         log = kzalloc(log_size, GFP_KERNEL);
3360         if (!log)
3361                 return;
3362
3363         /*
3364          * We need to read the log to clear the AEN, but we don't want to rely
3365          * on it for the changed namespace information as userspace could have
3366          * raced with us in reading the log page, which could cause us to miss
3367          * updates.
3368          */
3369         error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
3370                         log_size, 0);
3371         if (error)
3372                 dev_warn(ctrl->device,
3373                         "reading changed ns log failed: %d\n", error);
3374
3375         kfree(log);
3376 }
3377
3378 static void nvme_scan_work(struct work_struct *work)
3379 {
3380         struct nvme_ctrl *ctrl =
3381                 container_of(work, struct nvme_ctrl, scan_work);
3382         struct nvme_id_ctrl *id;
3383         unsigned nn;
3384
3385         if (ctrl->state != NVME_CTRL_LIVE)
3386                 return;
3387
3388         WARN_ON_ONCE(!ctrl->tagset);
3389
3390         if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3391                 dev_info(ctrl->device, "rescanning namespaces.\n");
3392                 nvme_clear_changed_ns_log(ctrl);
3393         }
3394
3395         if (nvme_identify_ctrl(ctrl, &id))
3396                 return;
3397
3398         mutex_lock(&ctrl->scan_lock);
3399         nn = le32_to_cpu(id->nn);
3400         if (!nvme_ctrl_limited_cns(ctrl)) {
3401                 if (!nvme_scan_ns_list(ctrl, nn))
3402                         goto out_free_id;
3403         }
3404         nvme_scan_ns_sequential(ctrl, nn);
3405 out_free_id:
3406         mutex_unlock(&ctrl->scan_lock);
3407         kfree(id);
3408         down_write(&ctrl->namespaces_rwsem);
3409         list_sort(NULL, &ctrl->namespaces, ns_cmp);
3410         up_write(&ctrl->namespaces_rwsem);
3411 }
3412
3413 /*
3414  * This function iterates the namespace list unlocked to allow recovery from
3415  * controller failure. It is up to the caller to ensure the namespace list is
3416  * not modified by scan work while this function is executing.
3417  */
3418 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3419 {
3420         struct nvme_ns *ns, *next;
3421         LIST_HEAD(ns_list);
3422
3423         /* prevent racing with ns scanning */
3424         flush_work(&ctrl->scan_work);
3425
3426         /*
3427          * The dead states indicates the controller was not gracefully
3428          * disconnected. In that case, we won't be able to flush any data while
3429          * removing the namespaces' disks; fail all the queues now to avoid
3430          * potentially having to clean up the failed sync later.
3431          */
3432         if (ctrl->state == NVME_CTRL_DEAD)
3433                 nvme_kill_queues(ctrl);
3434
3435         down_write(&ctrl->namespaces_rwsem);
3436         list_splice_init(&ctrl->namespaces, &ns_list);
3437         up_write(&ctrl->namespaces_rwsem);
3438
3439         list_for_each_entry_safe(ns, next, &ns_list, list)
3440                 nvme_ns_remove(ns);
3441 }
3442 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
3443
3444 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
3445 {
3446         char *envp[2] = { NULL, NULL };
3447         u32 aen_result = ctrl->aen_result;
3448
3449         ctrl->aen_result = 0;
3450         if (!aen_result)
3451                 return;
3452
3453         envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
3454         if (!envp[0])
3455                 return;
3456         kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
3457         kfree(envp[0]);
3458 }
3459
3460 static void nvme_async_event_work(struct work_struct *work)
3461 {
3462         struct nvme_ctrl *ctrl =
3463                 container_of(work, struct nvme_ctrl, async_event_work);
3464
3465         nvme_aen_uevent(ctrl);
3466         ctrl->ops->submit_async_event(ctrl);
3467 }
3468
3469 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
3470 {
3471
3472         u32 csts;
3473
3474         if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
3475                 return false;
3476
3477         if (csts == ~0)
3478                 return false;
3479
3480         return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
3481 }
3482
3483 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
3484 {
3485         struct nvme_fw_slot_info_log *log;
3486
3487         log = kmalloc(sizeof(*log), GFP_KERNEL);
3488         if (!log)
3489                 return;
3490
3491         if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
3492                         sizeof(*log), 0))
3493                 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
3494         kfree(log);
3495 }
3496
3497 static void nvme_fw_act_work(struct work_struct *work)
3498 {
3499         struct nvme_ctrl *ctrl = container_of(work,
3500                                 struct nvme_ctrl, fw_act_work);
3501         unsigned long fw_act_timeout;
3502
3503         if (ctrl->mtfa)
3504                 fw_act_timeout = jiffies +
3505                                 msecs_to_jiffies(ctrl->mtfa * 100);
3506         else
3507                 fw_act_timeout = jiffies +
3508                                 msecs_to_jiffies(admin_timeout * 1000);
3509
3510         nvme_stop_queues(ctrl);
3511         while (nvme_ctrl_pp_status(ctrl)) {
3512                 if (time_after(jiffies, fw_act_timeout)) {
3513                         dev_warn(ctrl->device,
3514                                 "Fw activation timeout, reset controller\n");
3515                         nvme_reset_ctrl(ctrl);
3516                         break;
3517                 }
3518                 msleep(100);
3519         }
3520
3521         if (ctrl->state != NVME_CTRL_LIVE)
3522                 return;
3523
3524         nvme_start_queues(ctrl);
3525         /* read FW slot information to clear the AER */
3526         nvme_get_fw_slot_info(ctrl);
3527 }
3528
3529 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
3530 {
3531         switch ((result & 0xff00) >> 8) {
3532         case NVME_AER_NOTICE_NS_CHANGED:
3533                 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
3534                 nvme_queue_scan(ctrl);
3535                 break;
3536         case NVME_AER_NOTICE_FW_ACT_STARTING:
3537                 queue_work(nvme_wq, &ctrl->fw_act_work);
3538                 break;
3539 #ifdef CONFIG_NVME_MULTIPATH
3540         case NVME_AER_NOTICE_ANA:
3541                 if (!ctrl->ana_log_buf)
3542                         break;
3543                 queue_work(nvme_wq, &ctrl->ana_work);
3544                 break;
3545 #endif
3546         default:
3547                 dev_warn(ctrl->device, "async event result %08x\n", result);
3548         }
3549 }
3550
3551 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
3552                 volatile union nvme_result *res)
3553 {
3554         u32 result = le32_to_cpu(res->u32);
3555
3556         if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
3557                 return;
3558
3559         switch (result & 0x7) {
3560         case NVME_AER_NOTICE:
3561                 nvme_handle_aen_notice(ctrl, result);
3562                 break;
3563         case NVME_AER_ERROR:
3564         case NVME_AER_SMART:
3565         case NVME_AER_CSS:
3566         case NVME_AER_VS:
3567                 ctrl->aen_result = result;
3568                 break;
3569         default:
3570                 break;
3571         }
3572         queue_work(nvme_wq, &ctrl->async_event_work);
3573 }
3574 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
3575
3576 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
3577 {
3578         nvme_mpath_stop(ctrl);
3579         nvme_stop_keep_alive(ctrl);
3580         flush_work(&ctrl->async_event_work);
3581         cancel_work_sync(&ctrl->fw_act_work);
3582         if (ctrl->ops->stop_ctrl)
3583                 ctrl->ops->stop_ctrl(ctrl);
3584 }
3585 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
3586
3587 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
3588 {
3589         if (ctrl->kato)
3590                 nvme_start_keep_alive(ctrl);
3591
3592         if (ctrl->queue_count > 1) {
3593                 nvme_queue_scan(ctrl);
3594                 nvme_enable_aen(ctrl);
3595                 queue_work(nvme_wq, &ctrl->async_event_work);
3596                 nvme_start_queues(ctrl);
3597         }
3598         ctrl->created = true;
3599 }
3600 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
3601
3602 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
3603 {
3604         dev_pm_qos_hide_latency_tolerance(ctrl->device);
3605         cdev_device_del(&ctrl->cdev, ctrl->device);
3606 }
3607 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
3608
3609 static void nvme_free_ctrl(struct device *dev)
3610 {
3611         struct nvme_ctrl *ctrl =
3612                 container_of(dev, struct nvme_ctrl, ctrl_device);
3613         struct nvme_subsystem *subsys = ctrl->subsys;
3614
3615         ida_simple_remove(&nvme_instance_ida, ctrl->instance);
3616         kfree(ctrl->effects);
3617         nvme_mpath_uninit(ctrl);
3618         __free_page(ctrl->discard_page);
3619
3620         if (subsys) {
3621                 mutex_lock(&subsys->lock);
3622                 list_del(&ctrl->subsys_entry);
3623                 mutex_unlock(&subsys->lock);
3624                 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
3625         }
3626
3627         ctrl->ops->free_ctrl(ctrl);
3628
3629         if (subsys)
3630                 nvme_put_subsystem(subsys);
3631 }
3632
3633 /*
3634  * Initialize a NVMe controller structures.  This needs to be called during
3635  * earliest initialization so that we have the initialized structured around
3636  * during probing.
3637  */
3638 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3639                 const struct nvme_ctrl_ops *ops, unsigned long quirks)
3640 {
3641         int ret;
3642
3643         ctrl->state = NVME_CTRL_NEW;
3644         spin_lock_init(&ctrl->lock);
3645         mutex_init(&ctrl->scan_lock);
3646         INIT_LIST_HEAD(&ctrl->namespaces);
3647         init_rwsem(&ctrl->namespaces_rwsem);
3648         ctrl->dev = dev;
3649         ctrl->ops = ops;
3650         ctrl->quirks = quirks;
3651         INIT_WORK(&ctrl->scan_work, nvme_scan_work);
3652         INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
3653         INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
3654         INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
3655
3656         INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
3657         memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
3658         ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
3659
3660         BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
3661                         PAGE_SIZE);
3662         ctrl->discard_page = alloc_page(GFP_KERNEL);
3663         if (!ctrl->discard_page) {
3664                 ret = -ENOMEM;
3665                 goto out;
3666         }
3667
3668         ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
3669         if (ret < 0)
3670                 goto out;
3671         ctrl->instance = ret;
3672
3673         device_initialize(&ctrl->ctrl_device);
3674         ctrl->device = &ctrl->ctrl_device;
3675         ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
3676         ctrl->device->class = nvme_class;
3677         ctrl->device->parent = ctrl->dev;
3678         ctrl->device->groups = nvme_dev_attr_groups;
3679         ctrl->device->release = nvme_free_ctrl;
3680         dev_set_drvdata(ctrl->device, ctrl);
3681         ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
3682         if (ret)
3683                 goto out_release_instance;
3684
3685         cdev_init(&ctrl->cdev, &nvme_dev_fops);
3686         ctrl->cdev.owner = ops->module;
3687         ret = cdev_device_add(&ctrl->cdev, ctrl->device);
3688         if (ret)
3689                 goto out_free_name;
3690
3691         /*
3692          * Initialize latency tolerance controls.  The sysfs files won't
3693          * be visible to userspace unless the device actually supports APST.
3694          */
3695         ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
3696         dev_pm_qos_update_user_latency_tolerance(ctrl->device,
3697                 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
3698
3699         return 0;
3700 out_free_name:
3701         kfree_const(ctrl->device->kobj.name);
3702 out_release_instance:
3703         ida_simple_remove(&nvme_instance_ida, ctrl->instance);
3704 out:
3705         if (ctrl->discard_page)
3706                 __free_page(ctrl->discard_page);
3707         return ret;
3708 }
3709 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
3710
3711 /**
3712  * nvme_kill_queues(): Ends all namespace queues
3713  * @ctrl: the dead controller that needs to end
3714  *
3715  * Call this function when the driver determines it is unable to get the
3716  * controller in a state capable of servicing IO.
3717  */
3718 void nvme_kill_queues(struct nvme_ctrl *ctrl)
3719 {
3720         struct nvme_ns *ns;
3721
3722         down_read(&ctrl->namespaces_rwsem);
3723
3724         /* Forcibly unquiesce queues to avoid blocking dispatch */
3725         if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
3726                 blk_mq_unquiesce_queue(ctrl->admin_q);
3727
3728         list_for_each_entry(ns, &ctrl->namespaces, list)
3729                 nvme_set_queue_dying(ns);
3730
3731         up_read(&ctrl->namespaces_rwsem);
3732 }
3733 EXPORT_SYMBOL_GPL(nvme_kill_queues);
3734
3735 void nvme_unfreeze(struct nvme_ctrl *ctrl)
3736 {
3737         struct nvme_ns *ns;
3738
3739         down_read(&ctrl->namespaces_rwsem);
3740         list_for_each_entry(ns, &ctrl->namespaces, list)
3741                 blk_mq_unfreeze_queue(ns->queue);
3742         up_read(&ctrl->namespaces_rwsem);
3743 }
3744 EXPORT_SYMBOL_GPL(nvme_unfreeze);
3745
3746 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
3747 {
3748         struct nvme_ns *ns;
3749
3750         down_read(&ctrl->namespaces_rwsem);
3751         list_for_each_entry(ns, &ctrl->namespaces, list) {
3752                 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
3753                 if (timeout <= 0)
3754                         break;
3755         }
3756         up_read(&ctrl->namespaces_rwsem);
3757 }
3758 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
3759
3760 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
3761 {
3762         struct nvme_ns *ns;
3763
3764         down_read(&ctrl->namespaces_rwsem);
3765         list_for_each_entry(ns, &ctrl->namespaces, list)
3766                 blk_mq_freeze_queue_wait(ns->queue);
3767         up_read(&ctrl->namespaces_rwsem);
3768 }
3769 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
3770
3771 void nvme_start_freeze(struct nvme_ctrl *ctrl)
3772 {
3773         struct nvme_ns *ns;
3774
3775         down_read(&ctrl->namespaces_rwsem);
3776         list_for_each_entry(ns, &ctrl->namespaces, list)
3777                 blk_freeze_queue_start(ns->queue);
3778         up_read(&ctrl->namespaces_rwsem);
3779 }
3780 EXPORT_SYMBOL_GPL(nvme_start_freeze);
3781
3782 void nvme_stop_queues(struct nvme_ctrl *ctrl)
3783 {
3784         struct nvme_ns *ns;
3785
3786         down_read(&ctrl->namespaces_rwsem);
3787         list_for_each_entry(ns, &ctrl->namespaces, list)
3788                 blk_mq_quiesce_queue(ns->queue);
3789         up_read(&ctrl->namespaces_rwsem);
3790 }
3791 EXPORT_SYMBOL_GPL(nvme_stop_queues);
3792
3793 void nvme_start_queues(struct nvme_ctrl *ctrl)
3794 {
3795         struct nvme_ns *ns;
3796
3797         down_read(&ctrl->namespaces_rwsem);
3798         list_for_each_entry(ns, &ctrl->namespaces, list)
3799                 blk_mq_unquiesce_queue(ns->queue);
3800         up_read(&ctrl->namespaces_rwsem);
3801 }
3802 EXPORT_SYMBOL_GPL(nvme_start_queues);
3803
3804 int __init nvme_core_init(void)
3805 {
3806         int result = -ENOMEM;
3807
3808         nvme_wq = alloc_workqueue("nvme-wq",
3809                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
3810         if (!nvme_wq)
3811                 goto out;
3812
3813         nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
3814                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
3815         if (!nvme_reset_wq)
3816                 goto destroy_wq;
3817
3818         nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
3819                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
3820         if (!nvme_delete_wq)
3821                 goto destroy_reset_wq;
3822
3823         result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
3824         if (result < 0)
3825                 goto destroy_delete_wq;
3826
3827         nvme_class = class_create(THIS_MODULE, "nvme");
3828         if (IS_ERR(nvme_class)) {
3829                 result = PTR_ERR(nvme_class);
3830                 goto unregister_chrdev;
3831         }
3832
3833         nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
3834         if (IS_ERR(nvme_subsys_class)) {
3835                 result = PTR_ERR(nvme_subsys_class);
3836                 goto destroy_class;
3837         }
3838         return 0;
3839
3840 destroy_class:
3841         class_destroy(nvme_class);
3842 unregister_chrdev:
3843         unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
3844 destroy_delete_wq:
3845         destroy_workqueue(nvme_delete_wq);
3846 destroy_reset_wq:
3847         destroy_workqueue(nvme_reset_wq);
3848 destroy_wq:
3849         destroy_workqueue(nvme_wq);
3850 out:
3851         return result;
3852 }
3853
3854 void nvme_core_exit(void)
3855 {
3856         ida_destroy(&nvme_subsystems_ida);
3857         class_destroy(nvme_subsys_class);
3858         class_destroy(nvme_class);
3859         unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
3860         destroy_workqueue(nvme_delete_wq);
3861         destroy_workqueue(nvme_reset_wq);
3862         destroy_workqueue(nvme_wq);
3863 }
3864
3865 MODULE_LICENSE("GPL");
3866 MODULE_VERSION("1.0");
3867 module_init(nvme_core_init);
3868 module_exit(nvme_core_exit);