GNU Linux-libre 5.10.217-gnu1
[releases.git] / drivers / nvme / target / io-cmd-file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target File I/O commands implementation.
4  * Copyright (c) 2017-2018 Western Digital Corporation or its
5  * affiliates.
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/uio.h>
9 #include <linux/falloc.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include "nvmet.h"
13
14 #define NVMET_MAX_MPOOL_BVEC            16
15 #define NVMET_MIN_MPOOL_OBJ             16
16
17 int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
18 {
19         struct kstat stat;
20         int ret;
21
22         ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
23                           AT_STATX_FORCE_SYNC);
24         if (!ret)
25                 ns->size = stat.size;
26         return ret;
27 }
28
29 void nvmet_file_ns_disable(struct nvmet_ns *ns)
30 {
31         if (ns->file) {
32                 if (ns->buffered_io)
33                         flush_workqueue(buffered_io_wq);
34                 mempool_destroy(ns->bvec_pool);
35                 ns->bvec_pool = NULL;
36                 kmem_cache_destroy(ns->bvec_cache);
37                 ns->bvec_cache = NULL;
38                 fput(ns->file);
39                 ns->file = NULL;
40         }
41 }
42
43 int nvmet_file_ns_enable(struct nvmet_ns *ns)
44 {
45         int flags = O_RDWR | O_LARGEFILE;
46         int ret;
47
48         if (!ns->buffered_io)
49                 flags |= O_DIRECT;
50
51         ns->file = filp_open(ns->device_path, flags, 0);
52         if (IS_ERR(ns->file)) {
53                 ret = PTR_ERR(ns->file);
54                 pr_err("failed to open file %s: (%d)\n",
55                         ns->device_path, ret);
56                 ns->file = NULL;
57                 return ret;
58         }
59
60         ret = nvmet_file_ns_revalidate(ns);
61         if (ret)
62                 goto err;
63
64         /*
65          * i_blkbits can be greater than the universally accepted upper bound,
66          * so make sure we export a sane namespace lba_shift.
67          */
68         ns->blksize_shift = min_t(u8,
69                         file_inode(ns->file)->i_blkbits, 12);
70
71         ns->bvec_cache = kmem_cache_create("nvmet-bvec",
72                         NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
73                         0, SLAB_HWCACHE_ALIGN, NULL);
74         if (!ns->bvec_cache) {
75                 ret = -ENOMEM;
76                 goto err;
77         }
78
79         ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
80                         mempool_free_slab, ns->bvec_cache);
81
82         if (!ns->bvec_pool) {
83                 ret = -ENOMEM;
84                 goto err;
85         }
86
87         return ret;
88 err:
89         ns->size = 0;
90         ns->blksize_shift = 0;
91         nvmet_file_ns_disable(ns);
92         return ret;
93 }
94
95 static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
96 {
97         bv->bv_page = sg_page(sg);
98         bv->bv_offset = sg->offset;
99         bv->bv_len = sg->length;
100 }
101
102 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
103                 unsigned long nr_segs, size_t count, int ki_flags)
104 {
105         struct kiocb *iocb = &req->f.iocb;
106         ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
107         struct iov_iter iter;
108         int rw;
109
110         if (req->cmd->rw.opcode == nvme_cmd_write) {
111                 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
112                         ki_flags |= IOCB_DSYNC;
113                 call_iter = req->ns->file->f_op->write_iter;
114                 rw = WRITE;
115         } else {
116                 call_iter = req->ns->file->f_op->read_iter;
117                 rw = READ;
118         }
119
120         iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
121
122         iocb->ki_pos = pos;
123         iocb->ki_filp = req->ns->file;
124         iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
125
126         return call_iter(iocb, &iter);
127 }
128
129 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
130 {
131         struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
132         u16 status = NVME_SC_SUCCESS;
133
134         if (req->f.bvec != req->inline_bvec) {
135                 if (likely(req->f.mpool_alloc == false))
136                         kfree(req->f.bvec);
137                 else
138                         mempool_free(req->f.bvec, req->ns->bvec_pool);
139         }
140
141         if (unlikely(ret != req->transfer_len))
142                 status = errno_to_nvme_status(req, ret);
143         nvmet_req_complete(req, status);
144 }
145
146 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
147 {
148         ssize_t nr_bvec = req->sg_cnt;
149         unsigned long bv_cnt = 0;
150         bool is_sync = false;
151         size_t len = 0, total_len = 0;
152         ssize_t ret = 0;
153         loff_t pos;
154         int i;
155         struct scatterlist *sg;
156
157         if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
158                 is_sync = true;
159
160         pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
161         if (unlikely(pos + req->transfer_len > req->ns->size)) {
162                 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
163                 return true;
164         }
165
166         memset(&req->f.iocb, 0, sizeof(struct kiocb));
167         for_each_sg(req->sg, sg, req->sg_cnt, i) {
168                 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
169                 len += req->f.bvec[bv_cnt].bv_len;
170                 total_len += req->f.bvec[bv_cnt].bv_len;
171                 bv_cnt++;
172
173                 WARN_ON_ONCE((nr_bvec - 1) < 0);
174
175                 if (unlikely(is_sync) &&
176                     (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
177                         ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
178                         if (ret < 0)
179                                 goto complete;
180
181                         pos += len;
182                         bv_cnt = 0;
183                         len = 0;
184                 }
185                 nr_bvec--;
186         }
187
188         if (WARN_ON_ONCE(total_len != req->transfer_len)) {
189                 ret = -EIO;
190                 goto complete;
191         }
192
193         if (unlikely(is_sync)) {
194                 ret = total_len;
195                 goto complete;
196         }
197
198         /*
199          * A NULL ki_complete ask for synchronous execution, which we want
200          * for the IOCB_NOWAIT case.
201          */
202         if (!(ki_flags & IOCB_NOWAIT))
203                 req->f.iocb.ki_complete = nvmet_file_io_done;
204
205         ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
206
207         switch (ret) {
208         case -EIOCBQUEUED:
209                 return true;
210         case -EAGAIN:
211                 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
212                         goto complete;
213                 return false;
214         case -EOPNOTSUPP:
215                 /*
216                  * For file systems returning error -EOPNOTSUPP, handle
217                  * IOCB_NOWAIT error case separately and retry without
218                  * IOCB_NOWAIT.
219                  */
220                 if ((ki_flags & IOCB_NOWAIT))
221                         return false;
222                 break;
223         }
224
225 complete:
226         nvmet_file_io_done(&req->f.iocb, ret, 0);
227         return true;
228 }
229
230 static void nvmet_file_buffered_io_work(struct work_struct *w)
231 {
232         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
233
234         nvmet_file_execute_io(req, 0);
235 }
236
237 static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
238 {
239         INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
240         queue_work(buffered_io_wq, &req->f.work);
241 }
242
243 static void nvmet_file_execute_rw(struct nvmet_req *req)
244 {
245         ssize_t nr_bvec = req->sg_cnt;
246
247         if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
248                 return;
249
250         if (!req->sg_cnt || !nr_bvec) {
251                 nvmet_req_complete(req, 0);
252                 return;
253         }
254
255         if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
256                 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
257                                 GFP_KERNEL);
258         else
259                 req->f.bvec = req->inline_bvec;
260
261         if (unlikely(!req->f.bvec)) {
262                 /* fallback under memory pressure */
263                 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
264                 req->f.mpool_alloc = true;
265         } else
266                 req->f.mpool_alloc = false;
267
268         if (req->ns->buffered_io) {
269                 if (likely(!req->f.mpool_alloc) &&
270                     (req->ns->file->f_mode & FMODE_NOWAIT) &&
271                     nvmet_file_execute_io(req, IOCB_NOWAIT))
272                         return;
273                 nvmet_file_submit_buffered_io(req);
274         } else
275                 nvmet_file_execute_io(req, 0);
276 }
277
278 u16 nvmet_file_flush(struct nvmet_req *req)
279 {
280         return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
281 }
282
283 static void nvmet_file_flush_work(struct work_struct *w)
284 {
285         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
286
287         nvmet_req_complete(req, nvmet_file_flush(req));
288 }
289
290 static void nvmet_file_execute_flush(struct nvmet_req *req)
291 {
292         if (!nvmet_check_transfer_len(req, 0))
293                 return;
294         INIT_WORK(&req->f.work, nvmet_file_flush_work);
295         schedule_work(&req->f.work);
296 }
297
298 static void nvmet_file_execute_discard(struct nvmet_req *req)
299 {
300         int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
301         struct nvme_dsm_range range;
302         loff_t offset, len;
303         u16 status = 0;
304         int ret;
305         int i;
306
307         for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
308                 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
309                                         sizeof(range));
310                 if (status)
311                         break;
312
313                 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
314                 len = le32_to_cpu(range.nlb);
315                 len <<= req->ns->blksize_shift;
316                 if (offset + len > req->ns->size) {
317                         req->error_slba = le64_to_cpu(range.slba);
318                         status = errno_to_nvme_status(req, -ENOSPC);
319                         break;
320                 }
321
322                 ret = vfs_fallocate(req->ns->file, mode, offset, len);
323                 if (ret && ret != -EOPNOTSUPP) {
324                         req->error_slba = le64_to_cpu(range.slba);
325                         status = errno_to_nvme_status(req, ret);
326                         break;
327                 }
328         }
329
330         nvmet_req_complete(req, status);
331 }
332
333 static void nvmet_file_dsm_work(struct work_struct *w)
334 {
335         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
336
337         switch (le32_to_cpu(req->cmd->dsm.attributes)) {
338         case NVME_DSMGMT_AD:
339                 nvmet_file_execute_discard(req);
340                 return;
341         case NVME_DSMGMT_IDR:
342         case NVME_DSMGMT_IDW:
343         default:
344                 /* Not supported yet */
345                 nvmet_req_complete(req, 0);
346                 return;
347         }
348 }
349
350 static void nvmet_file_execute_dsm(struct nvmet_req *req)
351 {
352         if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
353                 return;
354         INIT_WORK(&req->f.work, nvmet_file_dsm_work);
355         schedule_work(&req->f.work);
356 }
357
358 static void nvmet_file_write_zeroes_work(struct work_struct *w)
359 {
360         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
361         struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
362         int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
363         loff_t offset;
364         loff_t len;
365         int ret;
366
367         offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
368         len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
369                         req->ns->blksize_shift);
370
371         if (unlikely(offset + len > req->ns->size)) {
372                 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
373                 return;
374         }
375
376         ret = vfs_fallocate(req->ns->file, mode, offset, len);
377         nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
378 }
379
380 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
381 {
382         if (!nvmet_check_transfer_len(req, 0))
383                 return;
384         INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
385         schedule_work(&req->f.work);
386 }
387
388 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
389 {
390         struct nvme_command *cmd = req->cmd;
391
392         switch (cmd->common.opcode) {
393         case nvme_cmd_read:
394         case nvme_cmd_write:
395                 req->execute = nvmet_file_execute_rw;
396                 return 0;
397         case nvme_cmd_flush:
398                 req->execute = nvmet_file_execute_flush;
399                 return 0;
400         case nvme_cmd_dsm:
401                 req->execute = nvmet_file_execute_dsm;
402                 return 0;
403         case nvme_cmd_write_zeroes:
404                 req->execute = nvmet_file_execute_write_zeroes;
405                 return 0;
406         default:
407                 pr_err("unhandled cmd for file ns %d on qid %d\n",
408                                 cmd->common.opcode, req->sq->qid);
409                 req->error_loc = offsetof(struct nvme_common_command, opcode);
410                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
411         }
412 }