1 // SPDX-License-Identifier: GPL-2.0
3 * Discovery service for the NVMe over Fabrics target.
4 * Copyright (C) 2016 Intel Corporation. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/slab.h>
8 #include <generated/utsrelease.h>
11 struct nvmet_subsys *nvmet_disc_subsys;
13 static u64 nvmet_genctr;
15 static void __nvmet_disc_changed(struct nvmet_port *port,
16 struct nvmet_ctrl *ctrl)
18 if (ctrl->port != port)
21 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
24 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
25 NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
28 void nvmet_port_disc_changed(struct nvmet_port *port,
29 struct nvmet_subsys *subsys)
31 struct nvmet_ctrl *ctrl;
33 lockdep_assert_held(&nvmet_config_sem);
36 mutex_lock(&nvmet_disc_subsys->lock);
37 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
38 if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
41 __nvmet_disc_changed(port, ctrl);
43 mutex_unlock(&nvmet_disc_subsys->lock);
45 /* If transport can signal change, notify transport */
46 if (port->tr_ops && port->tr_ops->discovery_chg)
47 port->tr_ops->discovery_chg(port);
50 static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
51 struct nvmet_subsys *subsys,
52 struct nvmet_host *host)
54 struct nvmet_ctrl *ctrl;
56 mutex_lock(&nvmet_disc_subsys->lock);
57 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
58 if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
61 __nvmet_disc_changed(port, ctrl);
63 mutex_unlock(&nvmet_disc_subsys->lock);
66 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
67 struct nvmet_host *host)
69 struct nvmet_port *port;
70 struct nvmet_subsys_link *s;
74 list_for_each_entry(port, nvmet_ports, global_entry)
75 list_for_each_entry(s, &port->subsystems, entry) {
76 if (s->subsys != subsys)
78 __nvmet_subsys_disc_changed(port, subsys, host);
82 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
84 down_write(&nvmet_config_sem);
85 if (list_empty(&port->entry)) {
86 list_add_tail(&port->entry, &parent->referrals);
88 nvmet_port_disc_changed(parent, NULL);
90 up_write(&nvmet_config_sem);
93 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
95 down_write(&nvmet_config_sem);
96 if (!list_empty(&port->entry)) {
97 port->enabled = false;
98 list_del_init(&port->entry);
99 nvmet_port_disc_changed(parent, NULL);
101 up_write(&nvmet_config_sem);
104 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
105 struct nvmet_port *port, char *subsys_nqn, char *traddr,
108 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
110 e->trtype = port->disc_addr.trtype;
111 e->adrfam = port->disc_addr.adrfam;
112 e->treq = port->disc_addr.treq;
113 e->portid = port->disc_addr.portid;
114 /* we support only dynamic controllers */
115 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
116 e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
118 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
119 memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
120 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
121 strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
125 * nvmet_set_disc_traddr - set a correct discovery log entry traddr
127 * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
128 * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
129 * must not contain that "any" IP address. If the transport implements
130 * .disc_traddr, use it. this callback will set the discovery traddr
131 * from the req->port address in case the port in question listens
134 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
137 if (req->ops->disc_traddr)
138 req->ops->disc_traddr(req, port, traddr);
140 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
143 static size_t discovery_log_entries(struct nvmet_req *req)
145 struct nvmet_ctrl *ctrl = req->sq->ctrl;
146 struct nvmet_subsys_link *p;
147 struct nvmet_port *r;
150 list_for_each_entry(p, &req->port->subsystems, entry) {
151 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
155 list_for_each_entry(r, &req->port->referrals, entry)
160 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
162 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
163 struct nvmet_ctrl *ctrl = req->sq->ctrl;
164 struct nvmf_disc_rsp_page_hdr *hdr;
165 u64 offset = nvmet_get_log_page_offset(req->cmd);
166 size_t data_len = nvmet_get_log_page_len(req->cmd);
168 struct nvmet_subsys_link *p;
169 struct nvmet_port *r;
174 if (!nvmet_check_transfer_len(req, data_len))
177 if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
179 offsetof(struct nvme_get_log_page_command, lid);
180 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
184 /* Spec requires dword aligned offsets */
187 offsetof(struct nvme_get_log_page_command, lpo);
188 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
193 * Make sure we're passing at least a buffer of response header size.
194 * If host provided data len is less than the header size, only the
195 * number of bytes requested by host will be sent to host.
197 down_read(&nvmet_config_sem);
198 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
199 buffer = kzalloc(alloc_len, GFP_KERNEL);
201 up_read(&nvmet_config_sem);
202 status = NVME_SC_INTERNAL;
207 list_for_each_entry(p, &req->port->subsystems, entry) {
208 char traddr[NVMF_TRADDR_SIZE];
210 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
213 nvmet_set_disc_traddr(req, req->port, traddr);
214 nvmet_format_discovery_entry(hdr, req->port,
215 p->subsys->subsysnqn, traddr,
216 NVME_NQN_NVME, numrec);
220 list_for_each_entry(r, &req->port->referrals, entry) {
221 nvmet_format_discovery_entry(hdr, r,
222 NVME_DISC_SUBSYS_NAME,
224 NVME_NQN_DISC, numrec);
228 hdr->genctr = cpu_to_le64(nvmet_genctr);
229 hdr->numrec = cpu_to_le64(numrec);
230 hdr->recfmt = cpu_to_le16(0);
232 nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
234 up_read(&nvmet_config_sem);
236 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
239 nvmet_req_complete(req, status);
242 static void nvmet_execute_disc_identify(struct nvmet_req *req)
244 struct nvmet_ctrl *ctrl = req->sq->ctrl;
245 struct nvme_id_ctrl *id;
246 const char model[] = "Linux";
249 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
252 if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
253 req->error_loc = offsetof(struct nvme_identify, cns);
254 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
258 id = kzalloc(sizeof(*id), GFP_KERNEL);
260 status = NVME_SC_INTERNAL;
264 memset(id->sn, ' ', sizeof(id->sn));
265 bin2hex(id->sn, &ctrl->subsys->serial,
266 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
267 memset(id->fr, ' ', sizeof(id->fr));
268 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
269 memcpy_and_pad(id->fr, sizeof(id->fr),
270 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
272 /* no limit on data transfer sizes for now */
274 id->cntlid = cpu_to_le16(ctrl->cntlid);
275 id->ver = cpu_to_le32(ctrl->subsys->ver);
278 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
279 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
281 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
282 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
283 id->sgls |= cpu_to_le32(1 << 2);
284 if (req->port->inline_data_size)
285 id->sgls |= cpu_to_le32(1 << 20);
287 id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
289 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
291 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
295 nvmet_req_complete(req, status);
298 static void nvmet_execute_disc_set_features(struct nvmet_req *req)
300 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
303 if (!nvmet_check_transfer_len(req, 0))
306 switch (cdw10 & 0xff) {
308 stat = nvmet_set_feat_kato(req);
310 case NVME_FEAT_ASYNC_EVENT:
311 stat = nvmet_set_feat_async_event(req,
312 NVMET_DISC_AEN_CFG_OPTIONAL);
316 offsetof(struct nvme_common_command, cdw10);
317 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
321 nvmet_req_complete(req, stat);
324 static void nvmet_execute_disc_get_features(struct nvmet_req *req)
326 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
329 if (!nvmet_check_transfer_len(req, 0))
332 switch (cdw10 & 0xff) {
334 nvmet_get_feat_kato(req);
336 case NVME_FEAT_ASYNC_EVENT:
337 nvmet_get_feat_async_event(req);
341 offsetof(struct nvme_common_command, cdw10);
342 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
346 nvmet_req_complete(req, stat);
349 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
351 struct nvme_command *cmd = req->cmd;
353 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
354 pr_err("got cmd %d while not ready\n",
357 offsetof(struct nvme_common_command, opcode);
358 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
361 switch (cmd->common.opcode) {
362 case nvme_admin_set_features:
363 req->execute = nvmet_execute_disc_set_features;
365 case nvme_admin_get_features:
366 req->execute = nvmet_execute_disc_get_features;
368 case nvme_admin_async_event:
369 req->execute = nvmet_execute_async_event;
371 case nvme_admin_keep_alive:
372 req->execute = nvmet_execute_keep_alive;
374 case nvme_admin_get_log_page:
375 req->execute = nvmet_execute_disc_get_log_page;
377 case nvme_admin_identify:
378 req->execute = nvmet_execute_disc_identify;
381 pr_err("unhandled cmd %d\n", cmd->common.opcode);
382 req->error_loc = offsetof(struct nvme_common_command, opcode);
383 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
388 int __init nvmet_init_discovery(void)
391 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
392 return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
395 void nvmet_exit_discovery(void)
397 nvmet_subsys_put(nvmet_disc_subsys);