2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
23 #include <linux/bio.h>
24 #include <linux/stat.h>
25 #include <linux/errno.h>
26 #include <linux/file.h>
27 #include <linux/ioctl.h>
28 #include <linux/mutex.h>
29 #include <linux/compiler.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 static DEFINE_IDR(nbd_index_idr);
48 static DEFINE_MUTEX(nbd_index_mutex);
49 static int nbd_total_devices = 0;
54 struct request *pending;
61 struct recv_thread_args {
62 struct work_struct work;
63 struct nbd_device *nbd;
67 struct link_dead_args {
68 struct work_struct work;
72 #define NBD_TIMEDOUT 0
73 #define NBD_DISCONNECT_REQUESTED 1
74 #define NBD_DISCONNECTED 2
75 #define NBD_HAS_PID_FILE 3
76 #define NBD_HAS_CONFIG_REF 4
78 #define NBD_DESTROY_ON_DISCONNECT 6
79 #define NBD_DISCONNECT_ON_CLOSE 7
83 unsigned long runtime_flags;
84 u64 dead_conn_timeout;
86 struct nbd_sock **socks;
88 atomic_t live_connections;
89 wait_queue_head_t conn_wait;
91 atomic_t recv_threads;
92 wait_queue_head_t recv_wq;
95 #if IS_ENABLED(CONFIG_DEBUG_FS)
96 struct dentry *dbg_dir;
101 struct blk_mq_tag_set tag_set;
104 refcount_t config_refs;
106 struct nbd_config *config;
107 struct mutex config_lock;
108 struct gendisk *disk;
109 struct workqueue_struct *recv_workq;
111 struct list_head list;
112 struct task_struct *task_recv;
113 struct task_struct *task_setup;
116 #define NBD_CMD_REQUEUED 1
119 struct nbd_device *nbd;
128 #if IS_ENABLED(CONFIG_DEBUG_FS)
129 static struct dentry *nbd_dbg_dir;
132 #define nbd_name(nbd) ((nbd)->disk->disk_name)
134 #define NBD_MAGIC 0x68797548
136 #define NBD_DEF_BLKSIZE 1024
138 static unsigned int nbds_max = 16;
139 static int max_part = 16;
140 static int part_shift;
142 static int nbd_dev_dbg_init(struct nbd_device *nbd);
143 static void nbd_dev_dbg_close(struct nbd_device *nbd);
144 static void nbd_config_put(struct nbd_device *nbd);
145 static void nbd_connect_reply(struct genl_info *info, int index);
146 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
147 static void nbd_dead_link_work(struct work_struct *work);
148 static void nbd_disconnect_and_put(struct nbd_device *nbd);
150 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
152 return disk_to_dev(nbd->disk);
155 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
157 struct request *req = blk_mq_rq_from_pdu(cmd);
159 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
160 blk_mq_requeue_request(req, true);
163 #define NBD_COOKIE_BITS 32
165 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
167 struct request *req = blk_mq_rq_from_pdu(cmd);
168 u32 tag = blk_mq_unique_tag(req);
169 u64 cookie = cmd->cmd_cookie;
171 return (cookie << NBD_COOKIE_BITS) | tag;
174 static u32 nbd_handle_to_tag(u64 handle)
179 static u32 nbd_handle_to_cookie(u64 handle)
181 return (u32)(handle >> NBD_COOKIE_BITS);
184 static const char *nbdcmd_to_ascii(int cmd)
187 case NBD_CMD_READ: return "read";
188 case NBD_CMD_WRITE: return "write";
189 case NBD_CMD_DISC: return "disconnect";
190 case NBD_CMD_FLUSH: return "flush";
191 case NBD_CMD_TRIM: return "trim/discard";
196 static ssize_t pid_show(struct device *dev,
197 struct device_attribute *attr, char *buf)
199 struct gendisk *disk = dev_to_disk(dev);
200 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
202 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
205 static const struct device_attribute pid_attr = {
206 .attr = { .name = "pid", .mode = S_IRUGO},
210 static void nbd_dev_remove(struct nbd_device *nbd)
212 struct gendisk *disk = nbd->disk;
213 struct request_queue *q;
218 blk_cleanup_queue(q);
219 blk_mq_free_tag_set(&nbd->tag_set);
220 disk->private_data = NULL;
226 static void nbd_put(struct nbd_device *nbd)
228 if (refcount_dec_and_mutex_lock(&nbd->refs,
230 idr_remove(&nbd_index_idr, nbd->index);
232 mutex_unlock(&nbd_index_mutex);
236 static int nbd_disconnected(struct nbd_config *config)
238 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
239 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
242 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
245 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
246 struct link_dead_args *args;
247 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
249 INIT_WORK(&args->work, nbd_dead_link_work);
250 args->index = nbd->index;
251 queue_work(system_wq, &args->work);
255 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
256 atomic_dec(&nbd->config->live_connections);
259 nsock->pending = NULL;
263 static void nbd_size_clear(struct nbd_device *nbd)
265 if (nbd->config->bytesize) {
266 set_capacity(nbd->disk, 0);
267 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
271 static void nbd_size_update(struct nbd_device *nbd, bool start)
273 struct nbd_config *config = nbd->config;
274 struct block_device *bdev = bdget_disk(nbd->disk, 0);
276 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
277 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
278 set_capacity(nbd->disk, config->bytesize >> 9);
281 bd_set_size(bdev, config->bytesize);
283 set_blocksize(bdev, config->blksize);
285 bdev->bd_invalidated = 1;
288 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
291 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
294 struct nbd_config *config = nbd->config;
295 config->blksize = blocksize;
296 config->bytesize = blocksize * nr_blocks;
297 if (nbd->task_recv != NULL)
298 nbd_size_update(nbd, false);
301 static void nbd_complete_rq(struct request *req)
303 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
305 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd,
306 cmd->status ? "failed" : "done");
308 blk_mq_end_request(req, cmd->status);
312 * Forcibly shutdown the socket causing all listeners to error
314 static void sock_shutdown(struct nbd_device *nbd)
316 struct nbd_config *config = nbd->config;
319 if (config->num_connections == 0)
321 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
324 for (i = 0; i < config->num_connections; i++) {
325 struct nbd_sock *nsock = config->socks[i];
326 mutex_lock(&nsock->tx_lock);
327 nbd_mark_nsock_dead(nbd, nsock, 0);
328 mutex_unlock(&nsock->tx_lock);
330 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
333 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
336 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
337 struct nbd_device *nbd = cmd->nbd;
338 struct nbd_config *config;
340 if (!refcount_inc_not_zero(&nbd->config_refs)) {
341 cmd->status = BLK_STS_TIMEOUT;
342 return BLK_EH_HANDLED;
344 config = nbd->config;
346 if (!mutex_trylock(&cmd->lock)) {
348 return BLK_EH_RESET_TIMER;
351 if (config->num_connections > 1) {
352 dev_err_ratelimited(nbd_to_dev(nbd),
353 "Connection timed out, retrying\n");
355 * Hooray we have more connections, requeue this IO, the submit
356 * path will put it on a real connection.
358 if (config->socks && config->num_connections > 1) {
359 if (cmd->index < config->num_connections) {
360 struct nbd_sock *nsock =
361 config->socks[cmd->index];
362 mutex_lock(&nsock->tx_lock);
363 /* We can have multiple outstanding requests, so
364 * we don't want to mark the nsock dead if we've
365 * already reconnected with a new socket, so
366 * only mark it dead if its the same socket we
369 if (cmd->cookie == nsock->cookie)
370 nbd_mark_nsock_dead(nbd, nsock, 1);
371 mutex_unlock(&nsock->tx_lock);
373 mutex_unlock(&cmd->lock);
374 nbd_requeue_cmd(cmd);
376 return BLK_EH_NOT_HANDLED;
379 dev_err_ratelimited(nbd_to_dev(nbd),
380 "Connection timed out\n");
382 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
383 cmd->status = BLK_STS_IOERR;
384 mutex_unlock(&cmd->lock);
388 return BLK_EH_HANDLED;
392 * Send or receive packet.
394 static int sock_xmit(struct nbd_device *nbd, int index, int send,
395 struct iov_iter *iter, int msg_flags, int *sent)
397 struct nbd_config *config = nbd->config;
398 struct socket *sock = config->socks[index]->sock;
401 unsigned int noreclaim_flag;
403 if (unlikely(!sock)) {
404 dev_err_ratelimited(disk_to_dev(nbd->disk),
405 "Attempted %s on closed socket in sock_xmit\n",
406 (send ? "send" : "recv"));
410 msg.msg_iter = *iter;
412 noreclaim_flag = memalloc_noreclaim_save();
414 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
417 msg.msg_control = NULL;
418 msg.msg_controllen = 0;
419 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
422 result = sock_sendmsg(sock, &msg);
424 result = sock_recvmsg(sock, &msg, msg.msg_flags);
428 result = -EPIPE; /* short read */
433 } while (msg_data_left(&msg));
435 memalloc_noreclaim_restore(noreclaim_flag);
441 * Different settings for sk->sk_sndtimeo can result in different return values
442 * if there is a signal pending when we enter sendmsg, because reasons?
444 static inline int was_interrupted(int result)
446 return result == -ERESTARTSYS || result == -EINTR;
449 /* always call with the tx_lock held */
450 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
452 struct request *req = blk_mq_rq_from_pdu(cmd);
453 struct nbd_config *config = nbd->config;
454 struct nbd_sock *nsock = config->socks[index];
456 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
457 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
458 struct iov_iter from;
459 unsigned long size = blk_rq_bytes(req);
463 u32 nbd_cmd_flags = 0;
464 int sent = nsock->sent, skip = 0;
466 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
468 switch (req_op(req)) {
473 type = NBD_CMD_FLUSH;
476 type = NBD_CMD_WRITE;
485 if (rq_data_dir(req) == WRITE &&
486 (config->flags & NBD_FLAG_READ_ONLY)) {
487 dev_err_ratelimited(disk_to_dev(nbd->disk),
488 "Write on read-only\n");
492 if (req->cmd_flags & REQ_FUA)
493 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
495 /* We did a partial send previously, and we at least sent the whole
496 * request struct, so just go and send the rest of the pages in the
500 if (sent >= sizeof(request)) {
501 skip = sent - sizeof(request);
504 iov_iter_advance(&from, sent);
509 cmd->cookie = nsock->cookie;
510 request.type = htonl(type | nbd_cmd_flags);
511 if (type != NBD_CMD_FLUSH) {
512 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
513 request.len = htonl(size);
515 handle = nbd_cmd_handle(cmd);
516 memcpy(request.handle, &handle, sizeof(handle));
518 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
519 cmd, nbdcmd_to_ascii(type),
520 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
521 result = sock_xmit(nbd, index, 1, &from,
522 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
524 if (was_interrupted(result)) {
525 /* If we havne't sent anything we can just return BUSY,
526 * however if we have sent something we need to make
527 * sure we only allow this req to be sent until we are
531 nsock->pending = req;
534 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
535 return BLK_STS_RESOURCE;
537 dev_err_ratelimited(disk_to_dev(nbd->disk),
538 "Send control failed (result %d)\n", result);
542 if (type != NBD_CMD_WRITE)
547 struct bio *next = bio->bi_next;
548 struct bvec_iter iter;
551 bio_for_each_segment(bvec, bio, iter) {
552 bool is_last = !next && bio_iter_last(bvec, iter);
553 int flags = is_last ? 0 : MSG_MORE;
555 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
557 iov_iter_bvec(&from, ITER_BVEC | WRITE,
558 &bvec, 1, bvec.bv_len);
560 if (skip >= iov_iter_count(&from)) {
561 skip -= iov_iter_count(&from);
564 iov_iter_advance(&from, skip);
567 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
569 if (was_interrupted(result)) {
570 /* We've already sent the header, we
571 * have no choice but to set pending and
574 nsock->pending = req;
576 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
577 return BLK_STS_RESOURCE;
579 dev_err(disk_to_dev(nbd->disk),
580 "Send data failed (result %d)\n",
585 * The completion might already have come in,
586 * so break for the last one instead of letting
587 * the iterator do it. This prevents use-after-free
596 nsock->pending = NULL;
601 /* NULL returned = something went wrong, inform userspace */
602 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
604 struct nbd_config *config = nbd->config;
606 struct nbd_reply reply;
608 struct request *req = NULL;
612 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
617 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
618 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
620 if (!nbd_disconnected(config))
621 dev_err(disk_to_dev(nbd->disk),
622 "Receive control failed (result %d)\n", result);
623 return ERR_PTR(result);
626 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
627 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
628 (unsigned long)ntohl(reply.magic));
629 return ERR_PTR(-EPROTO);
632 memcpy(&handle, reply.handle, sizeof(handle));
633 tag = nbd_handle_to_tag(handle);
634 hwq = blk_mq_unique_tag_to_hwq(tag);
635 if (hwq < nbd->tag_set.nr_hw_queues)
636 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
637 blk_mq_unique_tag_to_tag(tag));
638 if (!req || !blk_mq_request_started(req)) {
639 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
641 return ERR_PTR(-ENOENT);
643 cmd = blk_mq_rq_to_pdu(req);
645 mutex_lock(&cmd->lock);
646 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
647 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
648 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
652 if (cmd->status != BLK_STS_OK) {
653 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
658 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
659 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
664 if (ntohl(reply.error)) {
665 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
667 cmd->status = BLK_STS_IOERR;
671 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
672 if (rq_data_dir(req) != WRITE) {
673 struct req_iterator iter;
676 rq_for_each_segment(bvec, req, iter) {
677 iov_iter_bvec(&to, ITER_BVEC | READ,
678 &bvec, 1, bvec.bv_len);
679 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
681 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
684 * If we've disconnected or we only have 1
685 * connection then we need to make sure we
686 * complete this request, otherwise error out
687 * and let the timeout stuff handle resubmitting
688 * this request onto another connection.
690 if (nbd_disconnected(config) ||
691 config->num_connections <= 1) {
692 cmd->status = BLK_STS_IOERR;
698 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
703 mutex_unlock(&cmd->lock);
704 return ret ? ERR_PTR(ret) : cmd;
707 static void recv_work(struct work_struct *work)
709 struct recv_thread_args *args = container_of(work,
710 struct recv_thread_args,
712 struct nbd_device *nbd = args->nbd;
713 struct nbd_config *config = nbd->config;
717 cmd = nbd_read_stat(nbd, args->index);
719 struct nbd_sock *nsock = config->socks[args->index];
721 mutex_lock(&nsock->tx_lock);
722 nbd_mark_nsock_dead(nbd, nsock, 1);
723 mutex_unlock(&nsock->tx_lock);
727 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
730 atomic_dec(&config->recv_threads);
731 wake_up(&config->recv_wq);
735 static void nbd_clear_req(struct request *req, void *data, bool reserved)
739 if (!blk_mq_request_started(req))
741 cmd = blk_mq_rq_to_pdu(req);
742 cmd->status = BLK_STS_IOERR;
743 blk_mq_complete_request(req);
746 static void nbd_clear_que(struct nbd_device *nbd)
748 blk_mq_quiesce_queue(nbd->disk->queue);
749 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
750 blk_mq_unquiesce_queue(nbd->disk->queue);
751 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
754 static int find_fallback(struct nbd_device *nbd, int index)
756 struct nbd_config *config = nbd->config;
758 struct nbd_sock *nsock = config->socks[index];
759 int fallback = nsock->fallback_index;
761 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
764 if (config->num_connections <= 1) {
765 dev_err_ratelimited(disk_to_dev(nbd->disk),
766 "Attempted send on invalid socket\n");
770 if (fallback >= 0 && fallback < config->num_connections &&
771 !config->socks[fallback]->dead)
774 if (nsock->fallback_index < 0 ||
775 nsock->fallback_index >= config->num_connections ||
776 config->socks[nsock->fallback_index]->dead) {
778 for (i = 0; i < config->num_connections; i++) {
781 if (!config->socks[i]->dead) {
786 nsock->fallback_index = new_index;
788 dev_err_ratelimited(disk_to_dev(nbd->disk),
789 "Dead connection, failed to find a fallback\n");
793 new_index = nsock->fallback_index;
797 static int wait_for_reconnect(struct nbd_device *nbd)
799 struct nbd_config *config = nbd->config;
800 if (!config->dead_conn_timeout)
802 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
804 wait_event_timeout(config->conn_wait,
805 atomic_read(&config->live_connections),
806 config->dead_conn_timeout);
807 return atomic_read(&config->live_connections);
810 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
812 struct request *req = blk_mq_rq_from_pdu(cmd);
813 struct nbd_device *nbd = cmd->nbd;
814 struct nbd_config *config;
815 struct nbd_sock *nsock;
818 if (!refcount_inc_not_zero(&nbd->config_refs)) {
819 dev_err_ratelimited(disk_to_dev(nbd->disk),
820 "Socks array is empty\n");
821 blk_mq_start_request(req);
824 config = nbd->config;
826 if (index >= config->num_connections) {
827 dev_err_ratelimited(disk_to_dev(nbd->disk),
828 "Attempted send on invalid socket\n");
830 blk_mq_start_request(req);
833 cmd->status = BLK_STS_OK;
835 nsock = config->socks[index];
836 mutex_lock(&nsock->tx_lock);
838 int old_index = index;
839 index = find_fallback(nbd, index);
840 mutex_unlock(&nsock->tx_lock);
842 if (wait_for_reconnect(nbd)) {
846 /* All the sockets should already be down at this point,
847 * we just want to make sure that DISCONNECTED is set so
848 * any requests that come in that were queue'ed waiting
849 * for the reconnect timer don't trigger the timer again
850 * and instead just error out.
854 blk_mq_start_request(req);
860 /* Handle the case that we have a pending request that was partially
861 * transmitted that _has_ to be serviced first. We need to call requeue
862 * here so that it gets put _after_ the request that is already on the
865 blk_mq_start_request(req);
866 if (unlikely(nsock->pending && nsock->pending != req)) {
867 nbd_requeue_cmd(cmd);
872 * Some failures are related to the link going down, so anything that
873 * returns EAGAIN can be retried on a different socket.
875 ret = nbd_send_cmd(nbd, cmd, index);
876 if (ret == -EAGAIN) {
877 dev_err_ratelimited(disk_to_dev(nbd->disk),
878 "Request send failed, requeueing\n");
879 nbd_mark_nsock_dead(nbd, nsock, 1);
880 nbd_requeue_cmd(cmd);
884 mutex_unlock(&nsock->tx_lock);
889 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
890 const struct blk_mq_queue_data *bd)
892 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
896 * Since we look at the bio's to send the request over the network we
897 * need to make sure the completion work doesn't mark this request done
898 * before we are done doing our send. This keeps us from dereferencing
899 * freed data if we have particularly fast completions (ie we get the
900 * completion before we exit sock_xmit on the last bvec) or in the case
901 * that the server is misbehaving (or there was an error) before we're
902 * done sending everything over the wire.
904 mutex_lock(&cmd->lock);
905 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
907 /* We can be called directly from the user space process, which means we
908 * could possibly have signals pending so our sendmsg will fail. In
909 * this case we need to return that we are busy, otherwise error out as
912 ret = nbd_handle_cmd(cmd, hctx->queue_num);
917 mutex_unlock(&cmd->lock);
922 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
928 sock = sockfd_lookup(fd, err);
932 if (sock->ops->shutdown == sock_no_shutdown) {
933 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
942 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
945 struct nbd_config *config = nbd->config;
947 struct nbd_sock **socks;
948 struct nbd_sock *nsock;
951 sock = nbd_get_socket(nbd, arg, &err);
956 * We need to make sure we don't get any errant requests while we're
957 * reallocating the ->socks array.
959 blk_mq_freeze_queue(nbd->disk->queue);
961 if (!netlink && !nbd->task_setup &&
962 !test_bit(NBD_BOUND, &config->runtime_flags))
963 nbd->task_setup = current;
966 (nbd->task_setup != current ||
967 test_bit(NBD_BOUND, &config->runtime_flags))) {
968 dev_err(disk_to_dev(nbd->disk),
969 "Device being setup by another task");
974 nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
980 socks = krealloc(config->socks, (config->num_connections + 1) *
981 sizeof(struct nbd_sock *), GFP_KERNEL);
988 config->socks = socks;
990 nsock->fallback_index = -1;
992 mutex_init(&nsock->tx_lock);
994 nsock->pending = NULL;
997 socks[config->num_connections++] = nsock;
998 atomic_inc(&config->live_connections);
999 blk_mq_unfreeze_queue(nbd->disk->queue);
1004 blk_mq_unfreeze_queue(nbd->disk->queue);
1009 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1011 struct nbd_config *config = nbd->config;
1012 struct socket *sock, *old;
1013 struct recv_thread_args *args;
1017 sock = nbd_get_socket(nbd, arg, &err);
1021 args = kzalloc(sizeof(*args), GFP_KERNEL);
1027 for (i = 0; i < config->num_connections; i++) {
1028 struct nbd_sock *nsock = config->socks[i];
1033 mutex_lock(&nsock->tx_lock);
1035 mutex_unlock(&nsock->tx_lock);
1038 sk_set_memalloc(sock->sk);
1039 if (nbd->tag_set.timeout)
1040 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1041 atomic_inc(&config->recv_threads);
1042 refcount_inc(&nbd->config_refs);
1044 nsock->fallback_index = -1;
1046 nsock->dead = false;
1047 INIT_WORK(&args->work, recv_work);
1051 mutex_unlock(&nsock->tx_lock);
1054 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
1056 /* We take the tx_mutex in an error path in the recv_work, so we
1057 * need to queue_work outside of the tx_mutex.
1059 queue_work(nbd->recv_workq, &args->work);
1061 atomic_inc(&config->live_connections);
1062 wake_up(&config->conn_wait);
1070 static void nbd_bdev_reset(struct block_device *bdev)
1072 if (bdev->bd_openers > 1)
1074 bd_set_size(bdev, 0);
1076 blkdev_reread_part(bdev);
1077 bdev->bd_invalidated = 1;
1081 static void nbd_parse_flags(struct nbd_device *nbd)
1083 struct nbd_config *config = nbd->config;
1084 if (config->flags & NBD_FLAG_READ_ONLY)
1085 set_disk_ro(nbd->disk, true);
1087 set_disk_ro(nbd->disk, false);
1088 if (config->flags & NBD_FLAG_SEND_TRIM)
1089 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1090 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1091 if (config->flags & NBD_FLAG_SEND_FUA)
1092 blk_queue_write_cache(nbd->disk->queue, true, true);
1094 blk_queue_write_cache(nbd->disk->queue, true, false);
1097 blk_queue_write_cache(nbd->disk->queue, false, false);
1100 static void send_disconnects(struct nbd_device *nbd)
1102 struct nbd_config *config = nbd->config;
1103 struct nbd_request request = {
1104 .magic = htonl(NBD_REQUEST_MAGIC),
1105 .type = htonl(NBD_CMD_DISC),
1107 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1108 struct iov_iter from;
1111 for (i = 0; i < config->num_connections; i++) {
1112 struct nbd_sock *nsock = config->socks[i];
1114 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
1115 mutex_lock(&nsock->tx_lock);
1116 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1118 dev_err(disk_to_dev(nbd->disk),
1119 "Send disconnect failed %d\n", ret);
1120 mutex_unlock(&nsock->tx_lock);
1124 static int nbd_disconnect(struct nbd_device *nbd)
1126 struct nbd_config *config = nbd->config;
1128 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1129 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
1130 send_disconnects(nbd);
1134 static void nbd_clear_sock(struct nbd_device *nbd)
1138 nbd->task_setup = NULL;
1141 static void nbd_config_put(struct nbd_device *nbd)
1143 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1144 &nbd->config_lock)) {
1145 struct nbd_config *config = nbd->config;
1146 nbd_dev_dbg_close(nbd);
1147 nbd_size_clear(nbd);
1148 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1149 &config->runtime_flags))
1150 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1151 nbd->task_recv = NULL;
1152 nbd_clear_sock(nbd);
1153 if (config->num_connections) {
1155 for (i = 0; i < config->num_connections; i++) {
1156 sockfd_put(config->socks[i]->sock);
1157 kfree(config->socks[i]);
1159 kfree(config->socks);
1164 if (nbd->recv_workq)
1165 destroy_workqueue(nbd->recv_workq);
1166 nbd->recv_workq = NULL;
1168 nbd->tag_set.timeout = 0;
1169 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1171 mutex_unlock(&nbd->config_lock);
1173 module_put(THIS_MODULE);
1177 static int nbd_start_device(struct nbd_device *nbd)
1179 struct nbd_config *config = nbd->config;
1180 int num_connections = config->num_connections;
1187 if (num_connections > 1 &&
1188 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1189 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1193 nbd->recv_workq = alloc_workqueue("knbd%d-recv",
1194 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1195 WQ_UNBOUND, 0, nbd->index);
1196 if (!nbd->recv_workq) {
1197 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1201 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1202 nbd->task_recv = current;
1204 nbd_parse_flags(nbd);
1206 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1208 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1211 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
1213 nbd_dev_dbg_init(nbd);
1214 for (i = 0; i < num_connections; i++) {
1215 struct recv_thread_args *args;
1217 args = kzalloc(sizeof(*args), GFP_KERNEL);
1221 * If num_connections is m (2 < m),
1222 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1223 * But NO.(n + 1) failed. We still have n recv threads.
1224 * So, add flush_workqueue here to prevent recv threads
1225 * dropping the last config_refs and trying to destroy
1226 * the workqueue from inside the workqueue.
1229 flush_workqueue(nbd->recv_workq);
1232 sk_set_memalloc(config->socks[i]->sock->sk);
1233 if (nbd->tag_set.timeout)
1234 config->socks[i]->sock->sk->sk_sndtimeo =
1235 nbd->tag_set.timeout;
1236 atomic_inc(&config->recv_threads);
1237 refcount_inc(&nbd->config_refs);
1238 INIT_WORK(&args->work, recv_work);
1241 queue_work(nbd->recv_workq, &args->work);
1243 nbd_size_update(nbd, true);
1247 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1249 struct nbd_config *config = nbd->config;
1252 ret = nbd_start_device(nbd);
1257 bdev->bd_invalidated = 1;
1258 mutex_unlock(&nbd->config_lock);
1259 ret = wait_event_interruptible(config->recv_wq,
1260 atomic_read(&config->recv_threads) == 0);
1266 flush_workqueue(nbd->recv_workq);
1267 mutex_lock(&nbd->config_lock);
1268 bd_set_size(bdev, 0);
1269 /* user requested, ignore socket errors */
1270 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
1272 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
1277 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1278 struct block_device *bdev)
1280 nbd_clear_sock(nbd);
1281 __invalidate_device(bdev, true);
1282 nbd_bdev_reset(bdev);
1283 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1284 &nbd->config->runtime_flags))
1285 nbd_config_put(nbd);
1288 static bool nbd_is_valid_blksize(unsigned long blksize)
1290 if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
1291 blksize > PAGE_SIZE)
1296 /* Must be called with config_lock held */
1297 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1298 unsigned int cmd, unsigned long arg)
1300 struct nbd_config *config = nbd->config;
1303 case NBD_DISCONNECT:
1304 return nbd_disconnect(nbd);
1305 case NBD_CLEAR_SOCK:
1306 nbd_clear_sock_ioctl(nbd, bdev);
1309 return nbd_add_socket(nbd, arg, false);
1310 case NBD_SET_BLKSIZE:
1312 arg = NBD_DEF_BLKSIZE;
1313 if (!nbd_is_valid_blksize(arg))
1315 nbd_size_set(nbd, arg,
1316 div_s64(config->bytesize, arg));
1319 nbd_size_set(nbd, config->blksize,
1320 div_s64(arg, config->blksize));
1322 case NBD_SET_SIZE_BLOCKS:
1323 nbd_size_set(nbd, config->blksize, arg);
1325 case NBD_SET_TIMEOUT:
1327 nbd->tag_set.timeout = arg * HZ;
1328 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1333 config->flags = arg;
1336 return nbd_start_device_ioctl(nbd, bdev);
1339 * This is for compatibility only. The queue is always cleared
1340 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1343 case NBD_PRINT_DEBUG:
1345 * For compatibility only, we no longer keep a list of
1346 * outstanding requests.
1353 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1354 unsigned int cmd, unsigned long arg)
1356 struct nbd_device *nbd = bdev->bd_disk->private_data;
1357 struct nbd_config *config = nbd->config;
1358 int error = -EINVAL;
1360 if (!capable(CAP_SYS_ADMIN))
1363 /* The block layer will pass back some non-nbd ioctls in case we have
1364 * special handling for them, but we don't so just return an error.
1366 if (_IOC_TYPE(cmd) != 0xab)
1369 mutex_lock(&nbd->config_lock);
1371 /* Don't allow ioctl operations on a nbd device that was created with
1372 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1374 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1375 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1376 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1378 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1379 mutex_unlock(&nbd->config_lock);
1383 static struct nbd_config *nbd_alloc_config(void)
1385 struct nbd_config *config;
1387 if (!try_module_get(THIS_MODULE))
1388 return ERR_PTR(-ENODEV);
1390 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1392 module_put(THIS_MODULE);
1393 return ERR_PTR(-ENOMEM);
1396 atomic_set(&config->recv_threads, 0);
1397 init_waitqueue_head(&config->recv_wq);
1398 init_waitqueue_head(&config->conn_wait);
1399 config->blksize = NBD_DEF_BLKSIZE;
1400 atomic_set(&config->live_connections, 0);
1404 static int nbd_open(struct block_device *bdev, fmode_t mode)
1406 struct nbd_device *nbd;
1409 mutex_lock(&nbd_index_mutex);
1410 nbd = bdev->bd_disk->private_data;
1415 if (!refcount_inc_not_zero(&nbd->refs)) {
1419 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1420 struct nbd_config *config;
1422 mutex_lock(&nbd->config_lock);
1423 if (refcount_inc_not_zero(&nbd->config_refs)) {
1424 mutex_unlock(&nbd->config_lock);
1427 config = nbd_alloc_config();
1428 if (IS_ERR(config)) {
1429 ret = PTR_ERR(config);
1430 mutex_unlock(&nbd->config_lock);
1433 nbd->config = config;
1434 refcount_set(&nbd->config_refs, 1);
1435 refcount_inc(&nbd->refs);
1436 mutex_unlock(&nbd->config_lock);
1439 mutex_unlock(&nbd_index_mutex);
1443 static void nbd_release(struct gendisk *disk, fmode_t mode)
1445 struct nbd_device *nbd = disk->private_data;
1446 struct block_device *bdev = bdget_disk(disk, 0);
1448 if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1449 bdev->bd_openers == 0)
1450 nbd_disconnect_and_put(nbd);
1453 nbd_config_put(nbd);
1457 static const struct block_device_operations nbd_fops =
1459 .owner = THIS_MODULE,
1461 .release = nbd_release,
1463 .compat_ioctl = nbd_ioctl,
1466 #if IS_ENABLED(CONFIG_DEBUG_FS)
1468 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1470 struct nbd_device *nbd = s->private;
1473 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
1478 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1480 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1483 static const struct file_operations nbd_dbg_tasks_ops = {
1484 .open = nbd_dbg_tasks_open,
1486 .llseek = seq_lseek,
1487 .release = single_release,
1490 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1492 struct nbd_device *nbd = s->private;
1493 u32 flags = nbd->config->flags;
1495 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1497 seq_puts(s, "Known flags:\n");
1499 if (flags & NBD_FLAG_HAS_FLAGS)
1500 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1501 if (flags & NBD_FLAG_READ_ONLY)
1502 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1503 if (flags & NBD_FLAG_SEND_FLUSH)
1504 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1505 if (flags & NBD_FLAG_SEND_FUA)
1506 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1507 if (flags & NBD_FLAG_SEND_TRIM)
1508 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1513 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1515 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1518 static const struct file_operations nbd_dbg_flags_ops = {
1519 .open = nbd_dbg_flags_open,
1521 .llseek = seq_lseek,
1522 .release = single_release,
1525 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1528 struct nbd_config *config = nbd->config;
1533 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1535 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1539 config->dbg_dir = dir;
1541 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
1542 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1543 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1544 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1545 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
1550 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1552 debugfs_remove_recursive(nbd->config->dbg_dir);
1555 static int nbd_dbg_init(void)
1557 struct dentry *dbg_dir;
1559 dbg_dir = debugfs_create_dir("nbd", NULL);
1560 if (IS_ERR(dbg_dir))
1563 nbd_dbg_dir = dbg_dir;
1568 static void nbd_dbg_close(void)
1570 debugfs_remove_recursive(nbd_dbg_dir);
1573 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1575 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1580 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1584 static int nbd_dbg_init(void)
1589 static void nbd_dbg_close(void)
1595 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1596 unsigned int hctx_idx, unsigned int numa_node)
1598 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1599 cmd->nbd = set->driver_data;
1601 mutex_init(&cmd->lock);
1605 static const struct blk_mq_ops nbd_mq_ops = {
1606 .queue_rq = nbd_queue_rq,
1607 .complete = nbd_complete_rq,
1608 .init_request = nbd_init_request,
1609 .timeout = nbd_xmit_timeout,
1612 static int nbd_dev_add(int index)
1614 struct nbd_device *nbd;
1615 struct gendisk *disk;
1616 struct request_queue *q;
1619 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1623 disk = alloc_disk(1 << part_shift);
1628 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1633 err = idr_alloc(&nbd_index_idr, nbd, 0,
1634 (MINORMASK >> part_shift) + 1, GFP_KERNEL);
1643 nbd->tag_set.ops = &nbd_mq_ops;
1644 nbd->tag_set.nr_hw_queues = 1;
1645 nbd->tag_set.queue_depth = 128;
1646 nbd->tag_set.numa_node = NUMA_NO_NODE;
1647 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1648 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1649 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1650 nbd->tag_set.driver_data = nbd;
1652 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1656 q = blk_mq_init_queue(&nbd->tag_set);
1664 * Tell the block layer that we are not a rotational device
1666 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1667 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1668 disk->queue->limits.discard_granularity = 512;
1669 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1670 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1671 blk_queue_max_segments(disk->queue, USHRT_MAX);
1672 blk_queue_max_hw_sectors(disk->queue, 65536);
1673 disk->queue->limits.max_sectors = 256;
1675 mutex_init(&nbd->config_lock);
1676 refcount_set(&nbd->config_refs, 0);
1677 refcount_set(&nbd->refs, 1);
1678 INIT_LIST_HEAD(&nbd->list);
1679 disk->major = NBD_MAJOR;
1680 disk->first_minor = index << part_shift;
1681 disk->fops = &nbd_fops;
1682 disk->private_data = nbd;
1683 sprintf(disk->disk_name, "nbd%d", index);
1685 nbd_total_devices++;
1689 blk_mq_free_tag_set(&nbd->tag_set);
1691 idr_remove(&nbd_index_idr, index);
1700 static int find_free_cb(int id, void *ptr, void *data)
1702 struct nbd_device *nbd = ptr;
1703 struct nbd_device **found = data;
1705 if (!refcount_read(&nbd->config_refs)) {
1712 /* Netlink interface. */
1713 static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1714 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1715 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1716 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1717 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1718 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1719 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1720 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1721 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1722 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1725 static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1726 [NBD_SOCK_FD] = { .type = NLA_U32 },
1729 /* We don't use this right now since we don't parse the incoming list, but we
1730 * still want it here so userspace knows what to expect.
1732 static struct nla_policy __attribute__((unused))
1733 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1734 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1735 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1738 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1740 struct nbd_device *nbd = NULL;
1741 struct nbd_config *config;
1744 bool put_dev = false;
1746 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1749 if (info->attrs[NBD_ATTR_INDEX])
1750 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1751 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1752 printk(KERN_ERR "nbd: must specify at least one socket\n");
1755 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1756 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1760 mutex_lock(&nbd_index_mutex);
1762 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1765 new_index = nbd_dev_add(-1);
1766 if (new_index < 0) {
1767 mutex_unlock(&nbd_index_mutex);
1768 printk(KERN_ERR "nbd: failed to add new device\n");
1771 nbd = idr_find(&nbd_index_idr, new_index);
1774 nbd = idr_find(&nbd_index_idr, index);
1776 ret = nbd_dev_add(index);
1778 mutex_unlock(&nbd_index_mutex);
1779 printk(KERN_ERR "nbd: failed to add new device\n");
1782 nbd = idr_find(&nbd_index_idr, index);
1786 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1788 mutex_unlock(&nbd_index_mutex);
1791 if (!refcount_inc_not_zero(&nbd->refs)) {
1792 mutex_unlock(&nbd_index_mutex);
1795 printk(KERN_ERR "nbd: device at index %d is going down\n",
1799 mutex_unlock(&nbd_index_mutex);
1801 mutex_lock(&nbd->config_lock);
1802 if (refcount_read(&nbd->config_refs)) {
1803 mutex_unlock(&nbd->config_lock);
1807 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1810 if (WARN_ON(nbd->config)) {
1811 mutex_unlock(&nbd->config_lock);
1815 config = nbd_alloc_config();
1816 if (IS_ERR(config)) {
1817 mutex_unlock(&nbd->config_lock);
1819 printk(KERN_ERR "nbd: couldn't allocate config\n");
1820 return PTR_ERR(config);
1822 nbd->config = config;
1823 refcount_set(&nbd->config_refs, 1);
1824 set_bit(NBD_BOUND, &config->runtime_flags);
1826 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1827 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1828 nbd_size_set(nbd, config->blksize,
1829 div64_u64(bytes, config->blksize));
1831 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1833 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1835 bsize = NBD_DEF_BLKSIZE;
1836 if (!nbd_is_valid_blksize(bsize)) {
1840 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1842 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1843 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1844 nbd->tag_set.timeout = timeout * HZ;
1845 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1847 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1848 config->dead_conn_timeout =
1849 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1850 config->dead_conn_timeout *= HZ;
1852 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1854 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1855 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1856 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1857 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1858 set_bit(NBD_DESTROY_ON_DISCONNECT,
1859 &config->runtime_flags);
1862 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1863 set_bit(NBD_DISCONNECT_ON_CLOSE,
1864 &config->runtime_flags);
1868 if (info->attrs[NBD_ATTR_SOCKETS]) {
1869 struct nlattr *attr;
1872 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1874 struct nlattr *socks[NBD_SOCK_MAX+1];
1876 if (nla_type(attr) != NBD_SOCK_ITEM) {
1877 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1881 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1882 nbd_sock_policy, info->extack);
1884 printk(KERN_ERR "nbd: error processing sock list\n");
1888 if (!socks[NBD_SOCK_FD])
1890 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1891 ret = nbd_add_socket(nbd, fd, true);
1896 ret = nbd_start_device(nbd);
1898 mutex_unlock(&nbd->config_lock);
1900 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1901 refcount_inc(&nbd->config_refs);
1902 nbd_connect_reply(info, nbd->index);
1904 nbd_config_put(nbd);
1910 static void nbd_disconnect_and_put(struct nbd_device *nbd)
1912 mutex_lock(&nbd->config_lock);
1913 nbd_disconnect(nbd);
1914 mutex_unlock(&nbd->config_lock);
1916 * Make sure recv thread has finished, so it does not drop the last
1917 * config ref and try to destroy the workqueue from inside the work
1920 flush_workqueue(nbd->recv_workq);
1921 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1922 &nbd->config->runtime_flags))
1923 nbd_config_put(nbd);
1926 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1928 struct nbd_device *nbd;
1931 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1934 if (!info->attrs[NBD_ATTR_INDEX]) {
1935 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1938 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1939 mutex_lock(&nbd_index_mutex);
1940 nbd = idr_find(&nbd_index_idr, index);
1942 mutex_unlock(&nbd_index_mutex);
1943 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1947 if (!refcount_inc_not_zero(&nbd->refs)) {
1948 mutex_unlock(&nbd_index_mutex);
1949 printk(KERN_ERR "nbd: device at index %d is going down\n",
1953 mutex_unlock(&nbd_index_mutex);
1954 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1958 nbd_disconnect_and_put(nbd);
1959 nbd_config_put(nbd);
1964 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1966 struct nbd_device *nbd = NULL;
1967 struct nbd_config *config;
1970 bool put_dev = false;
1972 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1975 if (!info->attrs[NBD_ATTR_INDEX]) {
1976 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1979 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1980 mutex_lock(&nbd_index_mutex);
1981 nbd = idr_find(&nbd_index_idr, index);
1983 mutex_unlock(&nbd_index_mutex);
1984 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1988 if (!refcount_inc_not_zero(&nbd->refs)) {
1989 mutex_unlock(&nbd_index_mutex);
1990 printk(KERN_ERR "nbd: device at index %d is going down\n",
1994 mutex_unlock(&nbd_index_mutex);
1996 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1997 dev_err(nbd_to_dev(nbd),
1998 "not configured, cannot reconfigure\n");
2003 mutex_lock(&nbd->config_lock);
2004 config = nbd->config;
2005 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
2007 dev_err(nbd_to_dev(nbd),
2008 "not configured, cannot reconfigure\n");
2013 if (info->attrs[NBD_ATTR_TIMEOUT]) {
2014 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
2015 nbd->tag_set.timeout = timeout * HZ;
2016 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
2018 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2019 config->dead_conn_timeout =
2020 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2021 config->dead_conn_timeout *= HZ;
2023 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2024 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2025 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2026 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2027 &config->runtime_flags))
2030 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2031 &config->runtime_flags))
2032 refcount_inc(&nbd->refs);
2035 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2036 set_bit(NBD_DISCONNECT_ON_CLOSE,
2037 &config->runtime_flags);
2039 clear_bit(NBD_DISCONNECT_ON_CLOSE,
2040 &config->runtime_flags);
2044 if (info->attrs[NBD_ATTR_SOCKETS]) {
2045 struct nlattr *attr;
2048 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2050 struct nlattr *socks[NBD_SOCK_MAX+1];
2052 if (nla_type(attr) != NBD_SOCK_ITEM) {
2053 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2057 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
2058 nbd_sock_policy, info->extack);
2060 printk(KERN_ERR "nbd: error processing sock list\n");
2064 if (!socks[NBD_SOCK_FD])
2066 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2067 ret = nbd_reconnect_socket(nbd, fd);
2073 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2077 mutex_unlock(&nbd->config_lock);
2078 nbd_config_put(nbd);
2085 static const struct genl_ops nbd_connect_genl_ops[] = {
2087 .cmd = NBD_CMD_CONNECT,
2088 .policy = nbd_attr_policy,
2089 .doit = nbd_genl_connect,
2092 .cmd = NBD_CMD_DISCONNECT,
2093 .policy = nbd_attr_policy,
2094 .doit = nbd_genl_disconnect,
2097 .cmd = NBD_CMD_RECONFIGURE,
2098 .policy = nbd_attr_policy,
2099 .doit = nbd_genl_reconfigure,
2102 .cmd = NBD_CMD_STATUS,
2103 .policy = nbd_attr_policy,
2104 .doit = nbd_genl_status,
2108 static const struct genl_multicast_group nbd_mcast_grps[] = {
2109 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2112 static struct genl_family nbd_genl_family __ro_after_init = {
2114 .name = NBD_GENL_FAMILY_NAME,
2115 .version = NBD_GENL_VERSION,
2116 .module = THIS_MODULE,
2117 .ops = nbd_connect_genl_ops,
2118 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2119 .maxattr = NBD_ATTR_MAX,
2120 .mcgrps = nbd_mcast_grps,
2121 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2124 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2126 struct nlattr *dev_opt;
2130 /* This is a little racey, but for status it's ok. The
2131 * reason we don't take a ref here is because we can't
2132 * take a ref in the index == -1 case as we would need
2133 * to put under the nbd_index_mutex, which could
2134 * deadlock if we are configured to remove ourselves
2135 * once we're disconnected.
2137 if (refcount_read(&nbd->config_refs))
2139 dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
2142 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2145 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2149 nla_nest_end(reply, dev_opt);
2153 static int status_cb(int id, void *ptr, void *data)
2155 struct nbd_device *nbd = ptr;
2156 return populate_nbd_status(nbd, (struct sk_buff *)data);
2159 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2161 struct nlattr *dev_list;
2162 struct sk_buff *reply;
2168 if (info->attrs[NBD_ATTR_INDEX])
2169 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2171 mutex_lock(&nbd_index_mutex);
2173 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2174 nla_attr_size(sizeof(u8)));
2175 msg_size *= (index == -1) ? nbd_total_devices : 1;
2177 reply = genlmsg_new(msg_size, GFP_KERNEL);
2180 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2187 dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
2189 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2195 struct nbd_device *nbd;
2196 nbd = idr_find(&nbd_index_idr, index);
2198 ret = populate_nbd_status(nbd, reply);
2205 nla_nest_end(reply, dev_list);
2206 genlmsg_end(reply, reply_head);
2207 genlmsg_reply(reply, info);
2210 mutex_unlock(&nbd_index_mutex);
2214 static void nbd_connect_reply(struct genl_info *info, int index)
2216 struct sk_buff *skb;
2220 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2223 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2229 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2234 genlmsg_end(skb, msg_head);
2235 genlmsg_reply(skb, info);
2238 static void nbd_mcast_index(int index)
2240 struct sk_buff *skb;
2244 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2247 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2253 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2258 genlmsg_end(skb, msg_head);
2259 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2262 static void nbd_dead_link_work(struct work_struct *work)
2264 struct link_dead_args *args = container_of(work, struct link_dead_args,
2266 nbd_mcast_index(args->index);
2270 static int __init nbd_init(void)
2274 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2277 printk(KERN_ERR "nbd: max_part must be >= 0\n");
2283 part_shift = fls(max_part);
2286 * Adjust max_part according to part_shift as it is exported
2287 * to user space so that user can know the max number of
2288 * partition kernel should be able to manage.
2290 * Note that -1 is required because partition 0 is reserved
2291 * for the whole disk.
2293 max_part = (1UL << part_shift) - 1;
2296 if ((1UL << part_shift) > DISK_MAX_PARTS)
2299 if (nbds_max > 1UL << (MINORBITS - part_shift))
2302 if (register_blkdev(NBD_MAJOR, "nbd"))
2305 if (genl_register_family(&nbd_genl_family)) {
2306 unregister_blkdev(NBD_MAJOR, "nbd");
2311 mutex_lock(&nbd_index_mutex);
2312 for (i = 0; i < nbds_max; i++)
2314 mutex_unlock(&nbd_index_mutex);
2318 static int nbd_exit_cb(int id, void *ptr, void *data)
2320 struct list_head *list = (struct list_head *)data;
2321 struct nbd_device *nbd = ptr;
2323 list_add_tail(&nbd->list, list);
2327 static void __exit nbd_cleanup(void)
2329 struct nbd_device *nbd;
2330 LIST_HEAD(del_list);
2333 * Unregister netlink interface prior to waiting
2334 * for the completion of netlink commands.
2336 genl_unregister_family(&nbd_genl_family);
2340 mutex_lock(&nbd_index_mutex);
2341 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2342 mutex_unlock(&nbd_index_mutex);
2344 while (!list_empty(&del_list)) {
2345 nbd = list_first_entry(&del_list, struct nbd_device, list);
2346 list_del_init(&nbd->list);
2347 if (refcount_read(&nbd->config_refs))
2348 printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
2349 refcount_read(&nbd->config_refs));
2350 if (refcount_read(&nbd->refs) != 1)
2351 printk(KERN_ERR "nbd: possibly leaking a device\n");
2355 idr_destroy(&nbd_index_idr);
2356 unregister_blkdev(NBD_MAJOR, "nbd");
2359 module_init(nbd_init);
2360 module_exit(nbd_cleanup);
2362 MODULE_DESCRIPTION("Network Block Device");
2363 MODULE_LICENSE("GPL");
2365 module_param(nbds_max, int, 0444);
2366 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2367 module_param(max_part, int, 0444);
2368 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");