2 * NVMe I/O command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/blkdev.h>
16 #include <linux/module.h>
19 static void nvmet_bio_done(struct bio *bio)
21 struct nvmet_req *req = bio->bi_private;
23 nvmet_req_complete(req,
24 bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
26 if (bio != &req->inline_bio)
30 static inline u32 nvmet_rw_len(struct nvmet_req *req)
32 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
33 req->ns->blksize_shift;
36 static void nvmet_inline_bio_init(struct nvmet_req *req)
38 struct bio *bio = &req->inline_bio;
40 bio_init(bio, req->inline_bvec, NVMET_MAX_INLINE_BIOVEC);
43 static void nvmet_execute_rw(struct nvmet_req *req)
45 int sg_cnt = req->sg_cnt;
46 struct scatterlist *sg;
50 int op, op_flags = 0, i;
53 nvmet_req_complete(req, 0);
57 if (req->cmd->rw.opcode == nvme_cmd_write) {
59 op_flags = REQ_SYNC | REQ_IDLE;
60 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
66 sector = le64_to_cpu(req->cmd->rw.slba);
67 sector <<= (req->ns->blksize_shift - 9);
69 nvmet_inline_bio_init(req);
70 bio = &req->inline_bio;
71 bio_set_dev(bio, req->ns->bdev);
72 bio->bi_iter.bi_sector = sector;
73 bio->bi_private = req;
74 bio->bi_end_io = nvmet_bio_done;
75 bio_set_op_attrs(bio, op, op_flags);
77 for_each_sg(req->sg, sg, req->sg_cnt, i) {
78 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
80 struct bio *prev = bio;
82 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
83 bio_set_dev(bio, req->ns->bdev);
84 bio->bi_iter.bi_sector = sector;
85 bio_set_op_attrs(bio, op, op_flags);
91 sector += sg->length >> 9;
95 cookie = submit_bio(bio);
97 blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
100 static void nvmet_execute_flush(struct nvmet_req *req)
104 nvmet_inline_bio_init(req);
105 bio = &req->inline_bio;
107 bio_set_dev(bio, req->ns->bdev);
108 bio->bi_private = req;
109 bio->bi_end_io = nvmet_bio_done;
110 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
115 static u16 nvmet_discard_range(struct nvmet_ns *ns,
116 struct nvme_dsm_range *range, struct bio **bio)
118 if (__blkdev_issue_discard(ns->bdev,
119 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
120 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
122 return NVME_SC_INTERNAL | NVME_SC_DNR;
126 static void nvmet_execute_discard(struct nvmet_req *req)
128 struct nvme_dsm_range range;
129 struct bio *bio = NULL;
133 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
134 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
139 status = nvmet_discard_range(req->ns, &range, &bio);
145 bio->bi_private = req;
146 bio->bi_end_io = nvmet_bio_done;
148 bio->bi_status = BLK_STS_IOERR;
154 nvmet_req_complete(req, status);
158 static void nvmet_execute_dsm(struct nvmet_req *req)
160 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
162 nvmet_execute_discard(req);
164 case NVME_DSMGMT_IDR:
165 case NVME_DSMGMT_IDW:
167 /* Not supported yet */
168 nvmet_req_complete(req, 0);
173 static void nvmet_execute_write_zeroes(struct nvmet_req *req)
175 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
176 struct bio *bio = NULL;
177 u16 status = NVME_SC_SUCCESS;
181 sector = le64_to_cpu(write_zeroes->slba) <<
182 (req->ns->blksize_shift - 9);
183 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
184 (req->ns->blksize_shift - 9)) + 1;
186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
187 GFP_KERNEL, &bio, 0))
188 status = NVME_SC_INTERNAL | NVME_SC_DNR;
191 bio->bi_private = req;
192 bio->bi_end_io = nvmet_bio_done;
195 nvmet_req_complete(req, status);
199 u16 nvmet_parse_io_cmd(struct nvmet_req *req)
201 struct nvme_command *cmd = req->cmd;
204 ret = nvmet_check_ctrl_status(req, cmd);
210 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
211 if (unlikely(!req->ns))
212 return NVME_SC_INVALID_NS | NVME_SC_DNR;
214 switch (cmd->common.opcode) {
217 req->execute = nvmet_execute_rw;
218 req->data_len = nvmet_rw_len(req);
221 req->execute = nvmet_execute_flush;
225 req->execute = nvmet_execute_dsm;
226 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
227 sizeof(struct nvme_dsm_range);
229 case nvme_cmd_write_zeroes:
230 req->execute = nvmet_execute_write_zeroes;
233 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
235 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;