GNU Linux-libre 4.9.315-gnu1
[releases.git] / drivers / nvme / target / io-cmd.c
1 /*
2  * NVMe I/O command implementation.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/blkdev.h>
16 #include <linux/module.h>
17 #include "nvmet.h"
18
19 static void nvmet_bio_done(struct bio *bio)
20 {
21         struct nvmet_req *req = bio->bi_private;
22
23         nvmet_req_complete(req,
24                 bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
25
26         if (bio != &req->inline_bio)
27                 bio_put(bio);
28 }
29
30 static inline u32 nvmet_rw_len(struct nvmet_req *req)
31 {
32         return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
33                         req->ns->blksize_shift;
34 }
35
36 static void nvmet_inline_bio_init(struct nvmet_req *req)
37 {
38         struct bio *bio = &req->inline_bio;
39
40         bio_init(bio);
41         bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
42         bio->bi_io_vec = req->inline_bvec;
43 }
44
45 static void nvmet_execute_rw(struct nvmet_req *req)
46 {
47         int sg_cnt = req->sg_cnt;
48         struct scatterlist *sg;
49         struct bio *bio;
50         sector_t sector;
51         blk_qc_t cookie;
52         int op, op_flags = 0, i;
53
54         if (!req->sg_cnt) {
55                 nvmet_req_complete(req, 0);
56                 return;
57         }
58
59         if (req->cmd->rw.opcode == nvme_cmd_write) {
60                 op = REQ_OP_WRITE;
61                 op_flags = WRITE_ODIRECT;
62                 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
63                         op_flags |= REQ_FUA;
64         } else {
65                 op = REQ_OP_READ;
66         }
67
68         sector = le64_to_cpu(req->cmd->rw.slba);
69         sector <<= (req->ns->blksize_shift - 9);
70
71         nvmet_inline_bio_init(req);
72         bio = &req->inline_bio;
73         bio->bi_bdev = req->ns->bdev;
74         bio->bi_iter.bi_sector = sector;
75         bio->bi_private = req;
76         bio->bi_end_io = nvmet_bio_done;
77         bio_set_op_attrs(bio, op, op_flags);
78
79         for_each_sg(req->sg, sg, req->sg_cnt, i) {
80                 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
81                                 != sg->length) {
82                         struct bio *prev = bio;
83
84                         bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
85                         bio->bi_bdev = req->ns->bdev;
86                         bio->bi_iter.bi_sector = sector;
87                         bio_set_op_attrs(bio, op, op_flags);
88
89                         bio_chain(bio, prev);
90                         cookie = submit_bio(prev);
91                 }
92
93                 sector += sg->length >> 9;
94                 sg_cnt--;
95         }
96
97         cookie = submit_bio(bio);
98
99         blk_poll(bdev_get_queue(req->ns->bdev), cookie);
100 }
101
102 static void nvmet_execute_flush(struct nvmet_req *req)
103 {
104         struct bio *bio;
105
106         nvmet_inline_bio_init(req);
107         bio = &req->inline_bio;
108
109         bio->bi_bdev = req->ns->bdev;
110         bio->bi_private = req;
111         bio->bi_end_io = nvmet_bio_done;
112         bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
113
114         submit_bio(bio);
115 }
116
117 static u16 nvmet_discard_range(struct nvmet_ns *ns,
118                 struct nvme_dsm_range *range, struct bio **bio)
119 {
120         if (__blkdev_issue_discard(ns->bdev,
121                         le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
122                         le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
123                         GFP_KERNEL, 0, bio))
124                 return NVME_SC_INTERNAL | NVME_SC_DNR;
125         return 0;
126 }
127
128 static void nvmet_execute_discard(struct nvmet_req *req)
129 {
130         struct nvme_dsm_range range;
131         struct bio *bio = NULL;
132         int i;
133         u16 status;
134
135         for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
136                 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
137                                 sizeof(range));
138                 if (status)
139                         break;
140
141                 status = nvmet_discard_range(req->ns, &range, &bio);
142                 if (status)
143                         break;
144         }
145
146         if (bio) {
147                 bio->bi_private = req;
148                 bio->bi_end_io = nvmet_bio_done;
149                 if (status) {
150                         bio->bi_error = -EIO;
151                         bio_endio(bio);
152                 } else {
153                         submit_bio(bio);
154                 }
155         } else {
156                 nvmet_req_complete(req, status);
157         }
158 }
159
160 static void nvmet_execute_dsm(struct nvmet_req *req)
161 {
162         switch (le32_to_cpu(req->cmd->dsm.attributes)) {
163         case NVME_DSMGMT_AD:
164                 nvmet_execute_discard(req);
165                 return;
166         case NVME_DSMGMT_IDR:
167         case NVME_DSMGMT_IDW:
168         default:
169                 /* Not supported yet */
170                 nvmet_req_complete(req, 0);
171                 return;
172         }
173 }
174
175 int nvmet_parse_io_cmd(struct nvmet_req *req)
176 {
177         struct nvme_command *cmd = req->cmd;
178
179         if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
180                 pr_err("nvmet: got io cmd %d while CC.EN == 0\n",
181                                 cmd->common.opcode);
182                 req->ns = NULL;
183                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
184         }
185
186         if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
187                 pr_err("nvmet: got io cmd %d while CSTS.RDY == 0\n",
188                                 cmd->common.opcode);
189                 req->ns = NULL;
190                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
191         }
192
193         req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
194         if (!req->ns)
195                 return NVME_SC_INVALID_NS | NVME_SC_DNR;
196
197         switch (cmd->common.opcode) {
198         case nvme_cmd_read:
199         case nvme_cmd_write:
200                 req->execute = nvmet_execute_rw;
201                 req->data_len = nvmet_rw_len(req);
202                 return 0;
203         case nvme_cmd_flush:
204                 req->execute = nvmet_execute_flush;
205                 req->data_len = 0;
206                 return 0;
207         case nvme_cmd_dsm:
208                 req->execute = nvmet_execute_dsm;
209                 req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
210                         sizeof(struct nvme_dsm_range);
211                 return 0;
212         default:
213                 pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
214                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
215         }
216 }