GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / nvme / target / io-cmd-file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target File I/O commands implementation.
4  * Copyright (c) 2017-2018 Western Digital Corporation or its
5  * affiliates.
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/uio.h>
9 #include <linux/falloc.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include "nvmet.h"
13
14 #define NVMET_MIN_MPOOL_OBJ             16
15
16 void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
17 {
18         ns->size = i_size_read(ns->file->f_mapping->host);
19 }
20
21 void nvmet_file_ns_disable(struct nvmet_ns *ns)
22 {
23         if (ns->file) {
24                 if (ns->buffered_io)
25                         flush_workqueue(buffered_io_wq);
26                 mempool_destroy(ns->bvec_pool);
27                 ns->bvec_pool = NULL;
28                 fput(ns->file);
29                 ns->file = NULL;
30         }
31 }
32
33 int nvmet_file_ns_enable(struct nvmet_ns *ns)
34 {
35         int flags = O_RDWR | O_LARGEFILE;
36         int ret = 0;
37
38         if (!ns->buffered_io)
39                 flags |= O_DIRECT;
40
41         ns->file = filp_open(ns->device_path, flags, 0);
42         if (IS_ERR(ns->file)) {
43                 ret = PTR_ERR(ns->file);
44                 pr_err("failed to open file %s: (%d)\n",
45                         ns->device_path, ret);
46                 ns->file = NULL;
47                 return ret;
48         }
49
50         nvmet_file_ns_revalidate(ns);
51
52         /*
53          * i_blkbits can be greater than the universally accepted upper bound,
54          * so make sure we export a sane namespace lba_shift.
55          */
56         ns->blksize_shift = min_t(u8,
57                         file_inode(ns->file)->i_blkbits, 12);
58
59         ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
60                         mempool_free_slab, nvmet_bvec_cache);
61
62         if (!ns->bvec_pool) {
63                 ret = -ENOMEM;
64                 goto err;
65         }
66
67         return ret;
68 err:
69         fput(ns->file);
70         ns->file = NULL;
71         ns->size = 0;
72         ns->blksize_shift = 0;
73         return ret;
74 }
75
76 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
77                 unsigned long nr_segs, size_t count, int ki_flags)
78 {
79         struct kiocb *iocb = &req->f.iocb;
80         ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
81         struct iov_iter iter;
82         int rw;
83
84         if (req->cmd->rw.opcode == nvme_cmd_write) {
85                 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
86                         ki_flags |= IOCB_DSYNC;
87                 call_iter = req->ns->file->f_op->write_iter;
88                 rw = ITER_SOURCE;
89         } else {
90                 call_iter = req->ns->file->f_op->read_iter;
91                 rw = ITER_DEST;
92         }
93
94         iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
95
96         iocb->ki_pos = pos;
97         iocb->ki_filp = req->ns->file;
98         iocb->ki_flags = ki_flags | iocb->ki_filp->f_iocb_flags;
99
100         return call_iter(iocb, &iter);
101 }
102
103 static void nvmet_file_io_done(struct kiocb *iocb, long ret)
104 {
105         struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
106         u16 status = NVME_SC_SUCCESS;
107
108         if (req->f.bvec != req->inline_bvec) {
109                 if (likely(req->f.mpool_alloc == false))
110                         kfree(req->f.bvec);
111                 else
112                         mempool_free(req->f.bvec, req->ns->bvec_pool);
113         }
114
115         if (unlikely(ret != req->transfer_len))
116                 status = errno_to_nvme_status(req, ret);
117         nvmet_req_complete(req, status);
118 }
119
120 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
121 {
122         ssize_t nr_bvec = req->sg_cnt;
123         unsigned long bv_cnt = 0;
124         bool is_sync = false;
125         size_t len = 0, total_len = 0;
126         ssize_t ret = 0;
127         loff_t pos;
128         int i;
129         struct scatterlist *sg;
130
131         if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
132                 is_sync = true;
133
134         pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
135         if (unlikely(pos + req->transfer_len > req->ns->size)) {
136                 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
137                 return true;
138         }
139
140         memset(&req->f.iocb, 0, sizeof(struct kiocb));
141         for_each_sg(req->sg, sg, req->sg_cnt, i) {
142                 bvec_set_page(&req->f.bvec[bv_cnt], sg_page(sg), sg->length,
143                               sg->offset);
144                 len += req->f.bvec[bv_cnt].bv_len;
145                 total_len += req->f.bvec[bv_cnt].bv_len;
146                 bv_cnt++;
147
148                 WARN_ON_ONCE((nr_bvec - 1) < 0);
149
150                 if (unlikely(is_sync) &&
151                     (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
152                         ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
153                         if (ret < 0)
154                                 goto complete;
155
156                         pos += len;
157                         bv_cnt = 0;
158                         len = 0;
159                 }
160                 nr_bvec--;
161         }
162
163         if (WARN_ON_ONCE(total_len != req->transfer_len)) {
164                 ret = -EIO;
165                 goto complete;
166         }
167
168         if (unlikely(is_sync)) {
169                 ret = total_len;
170                 goto complete;
171         }
172
173         /*
174          * A NULL ki_complete ask for synchronous execution, which we want
175          * for the IOCB_NOWAIT case.
176          */
177         if (!(ki_flags & IOCB_NOWAIT))
178                 req->f.iocb.ki_complete = nvmet_file_io_done;
179
180         ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
181
182         switch (ret) {
183         case -EIOCBQUEUED:
184                 return true;
185         case -EAGAIN:
186                 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
187                         goto complete;
188                 return false;
189         case -EOPNOTSUPP:
190                 /*
191                  * For file systems returning error -EOPNOTSUPP, handle
192                  * IOCB_NOWAIT error case separately and retry without
193                  * IOCB_NOWAIT.
194                  */
195                 if ((ki_flags & IOCB_NOWAIT))
196                         return false;
197                 break;
198         }
199
200 complete:
201         nvmet_file_io_done(&req->f.iocb, ret);
202         return true;
203 }
204
205 static void nvmet_file_buffered_io_work(struct work_struct *w)
206 {
207         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
208
209         nvmet_file_execute_io(req, 0);
210 }
211
212 static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
213 {
214         INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
215         queue_work(buffered_io_wq, &req->f.work);
216 }
217
218 static void nvmet_file_execute_rw(struct nvmet_req *req)
219 {
220         ssize_t nr_bvec = req->sg_cnt;
221
222         if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
223                 return;
224
225         if (!req->sg_cnt || !nr_bvec) {
226                 nvmet_req_complete(req, 0);
227                 return;
228         }
229
230         if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
231                 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
232                                 GFP_KERNEL);
233         else
234                 req->f.bvec = req->inline_bvec;
235
236         if (unlikely(!req->f.bvec)) {
237                 /* fallback under memory pressure */
238                 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
239                 req->f.mpool_alloc = true;
240         } else
241                 req->f.mpool_alloc = false;
242
243         if (req->ns->buffered_io) {
244                 if (likely(!req->f.mpool_alloc) &&
245                     (req->ns->file->f_mode & FMODE_NOWAIT) &&
246                     nvmet_file_execute_io(req, IOCB_NOWAIT))
247                         return;
248                 nvmet_file_submit_buffered_io(req);
249         } else
250                 nvmet_file_execute_io(req, 0);
251 }
252
253 u16 nvmet_file_flush(struct nvmet_req *req)
254 {
255         return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
256 }
257
258 static void nvmet_file_flush_work(struct work_struct *w)
259 {
260         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
261
262         nvmet_req_complete(req, nvmet_file_flush(req));
263 }
264
265 static void nvmet_file_execute_flush(struct nvmet_req *req)
266 {
267         if (!nvmet_check_transfer_len(req, 0))
268                 return;
269         INIT_WORK(&req->f.work, nvmet_file_flush_work);
270         queue_work(nvmet_wq, &req->f.work);
271 }
272
273 static void nvmet_file_execute_discard(struct nvmet_req *req)
274 {
275         int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
276         struct nvme_dsm_range range;
277         loff_t offset, len;
278         u16 status = 0;
279         int ret;
280         int i;
281
282         for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
283                 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
284                                         sizeof(range));
285                 if (status)
286                         break;
287
288                 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
289                 len = le32_to_cpu(range.nlb);
290                 len <<= req->ns->blksize_shift;
291                 if (offset + len > req->ns->size) {
292                         req->error_slba = le64_to_cpu(range.slba);
293                         status = errno_to_nvme_status(req, -ENOSPC);
294                         break;
295                 }
296
297                 ret = vfs_fallocate(req->ns->file, mode, offset, len);
298                 if (ret && ret != -EOPNOTSUPP) {
299                         req->error_slba = le64_to_cpu(range.slba);
300                         status = errno_to_nvme_status(req, ret);
301                         break;
302                 }
303         }
304
305         nvmet_req_complete(req, status);
306 }
307
308 static void nvmet_file_dsm_work(struct work_struct *w)
309 {
310         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
311
312         switch (le32_to_cpu(req->cmd->dsm.attributes)) {
313         case NVME_DSMGMT_AD:
314                 nvmet_file_execute_discard(req);
315                 return;
316         case NVME_DSMGMT_IDR:
317         case NVME_DSMGMT_IDW:
318         default:
319                 /* Not supported yet */
320                 nvmet_req_complete(req, 0);
321                 return;
322         }
323 }
324
325 static void nvmet_file_execute_dsm(struct nvmet_req *req)
326 {
327         if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
328                 return;
329         INIT_WORK(&req->f.work, nvmet_file_dsm_work);
330         queue_work(nvmet_wq, &req->f.work);
331 }
332
333 static void nvmet_file_write_zeroes_work(struct work_struct *w)
334 {
335         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
336         struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
337         int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
338         loff_t offset;
339         loff_t len;
340         int ret;
341
342         offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
343         len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
344                         req->ns->blksize_shift);
345
346         if (unlikely(offset + len > req->ns->size)) {
347                 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
348                 return;
349         }
350
351         ret = vfs_fallocate(req->ns->file, mode, offset, len);
352         nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
353 }
354
355 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
356 {
357         if (!nvmet_check_transfer_len(req, 0))
358                 return;
359         INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
360         queue_work(nvmet_wq, &req->f.work);
361 }
362
363 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
364 {
365         switch (req->cmd->common.opcode) {
366         case nvme_cmd_read:
367         case nvme_cmd_write:
368                 req->execute = nvmet_file_execute_rw;
369                 return 0;
370         case nvme_cmd_flush:
371                 req->execute = nvmet_file_execute_flush;
372                 return 0;
373         case nvme_cmd_dsm:
374                 req->execute = nvmet_file_execute_dsm;
375                 return 0;
376         case nvme_cmd_write_zeroes:
377                 req->execute = nvmet_file_execute_write_zeroes;
378                 return 0;
379         default:
380                 return nvmet_report_invalid_opcode(req);
381         }
382 }