2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/rculist.h>
18 #include <generated/utsrelease.h>
19 #include <asm/unaligned.h>
22 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
24 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
27 len += le16_to_cpu(cmd->get_log_page.numdl);
28 /* NUMD is a 0's based value */
35 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
36 struct nvme_smart_log *slog)
40 u64 host_reads, host_writes, data_units_read, data_units_written;
42 status = NVME_SC_SUCCESS;
43 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
45 status = NVME_SC_INVALID_NS;
46 pr_err("nvmet : Could not find namespace id : %d\n",
47 le32_to_cpu(req->cmd->get_log_page.nsid));
51 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
52 data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
53 sectors[READ]), 1000);
54 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
55 data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
56 sectors[WRITE]), 1000);
58 put_unaligned_le64(host_reads, &slog->host_reads[0]);
59 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
60 put_unaligned_le64(host_writes, &slog->host_writes[0]);
61 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
62 nvmet_put_namespace(ns);
67 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
68 struct nvme_smart_log *slog)
71 u64 host_reads = 0, host_writes = 0;
72 u64 data_units_read = 0, data_units_written = 0;
74 struct nvmet_ctrl *ctrl;
76 status = NVME_SC_SUCCESS;
80 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
81 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
82 data_units_read += DIV_ROUND_UP(
83 part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
84 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
85 data_units_written += DIV_ROUND_UP(
86 part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
91 put_unaligned_le64(host_reads, &slog->host_reads[0]);
92 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
93 put_unaligned_le64(host_writes, &slog->host_writes[0]);
94 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
99 static u16 nvmet_get_smart_log(struct nvmet_req *req,
100 struct nvme_smart_log *slog)
104 WARN_ON(req == NULL || slog == NULL);
105 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
106 status = nvmet_get_smart_log_all(req, slog);
108 status = nvmet_get_smart_log_nsid(req, slog);
112 static void nvmet_execute_get_log_page(struct nvmet_req *req)
114 struct nvme_smart_log *smart_log;
115 size_t data_len = nvmet_get_log_page_len(req->cmd);
119 buf = kzalloc(data_len, GFP_KERNEL);
121 status = NVME_SC_INTERNAL;
125 switch (req->cmd->get_log_page.lid) {
128 * We currently never set the More bit in the status field,
129 * so all error log entries are invalid and can be zeroed out.
130 * This is called a minum viable implementation (TM) of this
131 * mandatory log page.
136 * XXX: fill out actual smart log
138 * We might have a hard time coming up with useful values for
139 * many of the fields, and even when we have useful data
140 * available (e.g. units or commands read/written) those aren't
141 * persistent over power loss.
143 if (data_len != sizeof(*smart_log)) {
144 status = NVME_SC_INTERNAL;
148 status = nvmet_get_smart_log(req, smart_log);
150 memset(buf, '\0', data_len);
154 case NVME_LOG_FW_SLOT:
156 * We only support a single firmware slot which always is
157 * active, so we can zero out the whole firmware slot log and
158 * still claim to fully implement this mandatory log page.
165 status = nvmet_copy_to_sgl(req, 0, buf, data_len);
170 nvmet_req_complete(req, status);
173 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
175 struct nvmet_ctrl *ctrl = req->sq->ctrl;
176 struct nvme_id_ctrl *id;
178 const char model[] = "Linux";
180 id = kzalloc(sizeof(*id), GFP_KERNEL);
182 status = NVME_SC_INTERNAL;
186 /* XXX: figure out how to assign real vendors IDs. */
190 bin2hex(id->sn, &ctrl->subsys->serial,
191 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
192 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
193 memcpy_and_pad(id->fr, sizeof(id->fr),
194 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
199 * XXX: figure out how we can assign a IEEE OUI, but until then
200 * the safest is to leave it as zeroes.
203 /* we support multiple ports and multiples hosts: */
204 id->cmic = (1 << 0) | (1 << 1);
206 /* no limit on data transfer sizes for now */
208 id->cntlid = cpu_to_le16(ctrl->cntlid);
209 id->ver = cpu_to_le32(ctrl->subsys->ver);
211 /* XXX: figure out what to do about RTD3R/RTD3 */
212 id->oaes = cpu_to_le32(1 << 8);
213 id->ctratt = cpu_to_le32(1 << 0);
218 * We don't really have a practical limit on the number of abort
219 * comands. But we don't do anything useful for abort either, so
220 * no point in allowing more abort commands than the spec requires.
224 id->aerl = NVMET_ASYNC_EVENTS - 1;
226 /* first slot is read-only, only one slot supported */
227 id->frmw = (1 << 0) | (1 << 1);
228 id->lpa = (1 << 0) | (1 << 2);
229 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
232 /* We support keep-alive timeout in granularity of seconds */
233 id->kas = cpu_to_le16(NVMET_KAS);
235 id->sqes = (0x6 << 4) | 0x6;
236 id->cqes = (0x4 << 4) | 0x4;
238 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
239 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
241 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
242 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
243 NVME_CTRL_ONCS_WRITE_ZEROES);
245 /* XXX: don't report vwc if the underlying device is write through */
246 id->vwc = NVME_CTRL_VWC_PRESENT;
249 * We can't support atomic writes bigger than a LBA without support
250 * from the backend device.
255 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
256 if (ctrl->ops->has_keyed_sgls)
257 id->sgls |= cpu_to_le32(1 << 2);
258 if (ctrl->ops->sqe_inline_size)
259 id->sgls |= cpu_to_le32(1 << 20);
261 strcpy(id->subnqn, ctrl->subsys->subsysnqn);
263 /* Max command capsule size is sqe + single page of in-capsule data */
264 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
265 ctrl->ops->sqe_inline_size) / 16);
266 /* Max response capsule size is cqe */
267 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
269 id->msdbd = ctrl->ops->msdbd;
272 * Meh, we don't really support any power state. Fake up the same
273 * values that qemu does.
275 id->psd[0].max_power = cpu_to_le16(0x9c4);
276 id->psd[0].entry_lat = cpu_to_le32(0x10);
277 id->psd[0].exit_lat = cpu_to_le32(0x4);
279 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
283 nvmet_req_complete(req, status);
286 static void nvmet_execute_identify_ns(struct nvmet_req *req)
289 struct nvme_id_ns *id;
292 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
294 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
298 id = kzalloc(sizeof(*id), GFP_KERNEL);
300 status = NVME_SC_INTERNAL;
305 * nuse = ncap = nsze isn't aways true, but we have no way to find
306 * that out from the underlying device.
308 id->ncap = id->nuse = id->nsze =
309 cpu_to_le64(ns->size >> ns->blksize_shift);
312 * We just provide a single LBA format that matches what the
313 * underlying device reports.
319 * Our namespace might always be shared. Not just with other
320 * controllers, but also with any other user of the block device.
324 memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
326 id->lbaf[0].ds = ns->blksize_shift;
328 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
332 nvmet_put_namespace(ns);
334 nvmet_req_complete(req, status);
337 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
339 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
340 struct nvmet_ctrl *ctrl = req->sq->ctrl;
342 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
347 list = kzalloc(buf_size, GFP_KERNEL);
349 status = NVME_SC_INTERNAL;
354 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
355 if (ns->nsid <= min_nsid)
357 list[i++] = cpu_to_le32(ns->nsid);
358 if (i == buf_size / sizeof(__le32))
363 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
367 nvmet_req_complete(req, status);
370 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
371 void *id, off_t *off)
373 struct nvme_ns_id_desc desc = {
379 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
382 *off += sizeof(desc);
384 status = nvmet_copy_to_sgl(req, *off, id, len);
392 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
398 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
400 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
404 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
405 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
411 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
412 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
419 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
420 off) != NVME_IDENTIFY_DATA_SIZE - off)
421 status = NVME_SC_INTERNAL | NVME_SC_DNR;
423 nvmet_put_namespace(ns);
425 nvmet_req_complete(req, status);
429 * A "mimimum viable" abort implementation: the command is mandatory in the
430 * spec, but we are not required to do any useful work. We couldn't really
431 * do a useful abort, so don't bother even with waiting for the command
432 * to be exectuted and return immediately telling the command to abort
435 static void nvmet_execute_abort(struct nvmet_req *req)
437 nvmet_set_result(req, 1);
438 nvmet_req_complete(req, 0);
441 static void nvmet_execute_set_features(struct nvmet_req *req)
443 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
444 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
448 switch (cdw10 & 0xff) {
449 case NVME_FEAT_NUM_QUEUES:
450 nvmet_set_result(req,
451 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
454 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
455 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
456 nvmet_set_result(req, req->sq->ctrl->kato);
458 case NVME_FEAT_HOST_ID:
459 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
462 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
466 nvmet_req_complete(req, status);
469 static void nvmet_execute_get_features(struct nvmet_req *req)
471 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
472 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
475 switch (cdw10 & 0xff) {
477 * These features are mandatory in the spec, but we don't
478 * have a useful way to implement them. We'll eventually
479 * need to come up with some fake values for these.
482 case NVME_FEAT_ARBITRATION:
484 case NVME_FEAT_POWER_MGMT:
486 case NVME_FEAT_TEMP_THRESH:
488 case NVME_FEAT_ERR_RECOVERY:
490 case NVME_FEAT_IRQ_COALESCE:
492 case NVME_FEAT_IRQ_CONFIG:
494 case NVME_FEAT_WRITE_ATOMIC:
496 case NVME_FEAT_ASYNC_EVENT:
499 case NVME_FEAT_VOLATILE_WC:
500 nvmet_set_result(req, 1);
502 case NVME_FEAT_NUM_QUEUES:
503 nvmet_set_result(req,
504 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
507 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
509 case NVME_FEAT_HOST_ID:
510 /* need 128-bit host identifier flag */
511 if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
512 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
516 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
517 sizeof(req->sq->ctrl->hostid));
520 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
524 nvmet_req_complete(req, status);
527 static void nvmet_execute_async_event(struct nvmet_req *req)
529 struct nvmet_ctrl *ctrl = req->sq->ctrl;
531 mutex_lock(&ctrl->lock);
532 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
533 mutex_unlock(&ctrl->lock);
534 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
537 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
538 mutex_unlock(&ctrl->lock);
540 schedule_work(&ctrl->async_event_work);
543 static void nvmet_execute_keep_alive(struct nvmet_req *req)
545 struct nvmet_ctrl *ctrl = req->sq->ctrl;
547 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
548 ctrl->cntlid, ctrl->kato);
550 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
551 nvmet_req_complete(req, 0);
554 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
556 struct nvme_command *cmd = req->cmd;
561 ret = nvmet_check_ctrl_status(req, cmd);
565 switch (cmd->common.opcode) {
566 case nvme_admin_get_log_page:
567 req->data_len = nvmet_get_log_page_len(cmd);
569 switch (cmd->get_log_page.lid) {
572 case NVME_LOG_FW_SLOT:
573 req->execute = nvmet_execute_get_log_page;
577 case nvme_admin_identify:
578 req->data_len = NVME_IDENTIFY_DATA_SIZE;
579 switch (cmd->identify.cns) {
581 req->execute = nvmet_execute_identify_ns;
583 case NVME_ID_CNS_CTRL:
584 req->execute = nvmet_execute_identify_ctrl;
586 case NVME_ID_CNS_NS_ACTIVE_LIST:
587 req->execute = nvmet_execute_identify_nslist;
589 case NVME_ID_CNS_NS_DESC_LIST:
590 req->execute = nvmet_execute_identify_desclist;
594 case nvme_admin_abort_cmd:
595 req->execute = nvmet_execute_abort;
598 case nvme_admin_set_features:
599 req->execute = nvmet_execute_set_features;
602 case nvme_admin_get_features:
603 req->execute = nvmet_execute_get_features;
606 case nvme_admin_async_event:
607 req->execute = nvmet_execute_async_event;
610 case nvme_admin_keep_alive:
611 req->execute = nvmet_execute_keep_alive;
616 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
618 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;