1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
11 * (part of code stolen from loop.c)
14 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/completion.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/nbd.h>
50 static DEFINE_IDR(nbd_index_idr);
51 static DEFINE_MUTEX(nbd_index_mutex);
52 static int nbd_total_devices = 0;
57 struct request *pending;
64 struct recv_thread_args {
65 struct work_struct work;
66 struct nbd_device *nbd;
70 struct link_dead_args {
71 struct work_struct work;
75 #define NBD_RT_TIMEDOUT 0
76 #define NBD_RT_DISCONNECT_REQUESTED 1
77 #define NBD_RT_DISCONNECTED 2
78 #define NBD_RT_HAS_PID_FILE 3
79 #define NBD_RT_HAS_CONFIG_REF 4
80 #define NBD_RT_BOUND 5
81 #define NBD_RT_DISCONNECT_ON_CLOSE 6
83 #define NBD_DESTROY_ON_DISCONNECT 0
84 #define NBD_DISCONNECT_REQUESTED 1
88 unsigned long runtime_flags;
89 u64 dead_conn_timeout;
91 struct nbd_sock **socks;
93 atomic_t live_connections;
94 wait_queue_head_t conn_wait;
96 atomic_t recv_threads;
97 wait_queue_head_t recv_wq;
100 #if IS_ENABLED(CONFIG_DEBUG_FS)
101 struct dentry *dbg_dir;
106 struct blk_mq_tag_set tag_set;
109 refcount_t config_refs;
111 struct nbd_config *config;
112 struct mutex config_lock;
113 struct gendisk *disk;
114 struct workqueue_struct *recv_workq;
116 struct list_head list;
117 struct task_struct *task_recv;
118 struct task_struct *task_setup;
120 struct completion *destroy_complete;
124 #define NBD_CMD_REQUEUED 1
127 struct nbd_device *nbd;
137 #if IS_ENABLED(CONFIG_DEBUG_FS)
138 static struct dentry *nbd_dbg_dir;
141 #define nbd_name(nbd) ((nbd)->disk->disk_name)
143 #define NBD_MAGIC 0x68797548
145 #define NBD_DEF_BLKSIZE 1024
147 static unsigned int nbds_max = 16;
148 static int max_part = 16;
149 static int part_shift;
151 static int nbd_dev_dbg_init(struct nbd_device *nbd);
152 static void nbd_dev_dbg_close(struct nbd_device *nbd);
153 static void nbd_config_put(struct nbd_device *nbd);
154 static void nbd_connect_reply(struct genl_info *info, int index);
155 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
156 static void nbd_dead_link_work(struct work_struct *work);
157 static void nbd_disconnect_and_put(struct nbd_device *nbd);
159 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
161 return disk_to_dev(nbd->disk);
164 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
166 struct request *req = blk_mq_rq_from_pdu(cmd);
168 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
169 blk_mq_requeue_request(req, true);
172 #define NBD_COOKIE_BITS 32
174 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
176 struct request *req = blk_mq_rq_from_pdu(cmd);
177 u32 tag = blk_mq_unique_tag(req);
178 u64 cookie = cmd->cmd_cookie;
180 return (cookie << NBD_COOKIE_BITS) | tag;
183 static u32 nbd_handle_to_tag(u64 handle)
188 static u32 nbd_handle_to_cookie(u64 handle)
190 return (u32)(handle >> NBD_COOKIE_BITS);
193 static const char *nbdcmd_to_ascii(int cmd)
196 case NBD_CMD_READ: return "read";
197 case NBD_CMD_WRITE: return "write";
198 case NBD_CMD_DISC: return "disconnect";
199 case NBD_CMD_FLUSH: return "flush";
200 case NBD_CMD_TRIM: return "trim/discard";
205 static ssize_t pid_show(struct device *dev,
206 struct device_attribute *attr, char *buf)
208 struct gendisk *disk = dev_to_disk(dev);
209 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
211 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
214 static const struct device_attribute pid_attr = {
215 .attr = { .name = "pid", .mode = 0444},
219 static void nbd_dev_remove(struct nbd_device *nbd)
221 struct gendisk *disk = nbd->disk;
222 struct request_queue *q;
227 blk_cleanup_queue(q);
228 blk_mq_free_tag_set(&nbd->tag_set);
229 disk->private_data = NULL;
234 * Place this in the last just before the nbd is freed to
235 * make sure that the disk and the related kobject are also
236 * totally removed to avoid duplicate creation of the same
239 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
240 complete(nbd->destroy_complete);
245 static void nbd_put(struct nbd_device *nbd)
247 if (refcount_dec_and_mutex_lock(&nbd->refs,
249 idr_remove(&nbd_index_idr, nbd->index);
251 mutex_unlock(&nbd_index_mutex);
255 static int nbd_disconnected(struct nbd_config *config)
257 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
258 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
261 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
264 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
265 struct link_dead_args *args;
266 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
268 INIT_WORK(&args->work, nbd_dead_link_work);
269 args->index = nbd->index;
270 queue_work(system_wq, &args->work);
274 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
275 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
276 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
277 &nbd->config->runtime_flags)) {
278 set_bit(NBD_RT_DISCONNECTED,
279 &nbd->config->runtime_flags);
280 dev_info(nbd_to_dev(nbd),
281 "Disconnected due to user request.\n");
286 nsock->pending = NULL;
290 static void nbd_size_clear(struct nbd_device *nbd)
292 if (nbd->config->bytesize) {
293 set_capacity(nbd->disk, 0);
294 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
298 static void nbd_size_update(struct nbd_device *nbd, bool start)
300 struct nbd_config *config = nbd->config;
301 struct block_device *bdev = bdget_disk(nbd->disk, 0);
302 sector_t nr_sectors = config->bytesize >> 9;
304 if (config->flags & NBD_FLAG_SEND_TRIM) {
305 nbd->disk->queue->limits.discard_granularity = config->blksize;
306 nbd->disk->queue->limits.discard_alignment = config->blksize;
307 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
309 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
310 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
311 set_capacity(nbd->disk, nr_sectors);
314 bd_set_nr_sectors(bdev, nr_sectors);
316 set_blocksize(bdev, config->blksize);
318 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
321 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
324 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
327 struct nbd_config *config = nbd->config;
328 config->blksize = blocksize;
329 config->bytesize = blocksize * nr_blocks;
330 if (nbd->task_recv != NULL)
331 nbd_size_update(nbd, false);
334 static void nbd_complete_rq(struct request *req)
336 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
338 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
339 cmd->status ? "failed" : "done");
341 blk_mq_end_request(req, cmd->status);
345 * Forcibly shutdown the socket causing all listeners to error
347 static void sock_shutdown(struct nbd_device *nbd)
349 struct nbd_config *config = nbd->config;
352 if (config->num_connections == 0)
354 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
357 for (i = 0; i < config->num_connections; i++) {
358 struct nbd_sock *nsock = config->socks[i];
359 mutex_lock(&nsock->tx_lock);
360 nbd_mark_nsock_dead(nbd, nsock, 0);
361 mutex_unlock(&nsock->tx_lock);
363 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
366 static u32 req_to_nbd_cmd_type(struct request *req)
368 switch (req_op(req)) {
372 return NBD_CMD_FLUSH;
374 return NBD_CMD_WRITE;
382 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
385 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
386 struct nbd_device *nbd = cmd->nbd;
387 struct nbd_config *config;
389 if (!mutex_trylock(&cmd->lock))
390 return BLK_EH_RESET_TIMER;
392 if (!refcount_inc_not_zero(&nbd->config_refs)) {
393 cmd->status = BLK_STS_TIMEOUT;
394 mutex_unlock(&cmd->lock);
397 config = nbd->config;
399 if (config->num_connections > 1 ||
400 (config->num_connections == 1 && nbd->tag_set.timeout)) {
401 dev_err_ratelimited(nbd_to_dev(nbd),
402 "Connection timed out, retrying (%d/%d alive)\n",
403 atomic_read(&config->live_connections),
404 config->num_connections);
406 * Hooray we have more connections, requeue this IO, the submit
407 * path will put it on a real connection. Or if only one
408 * connection is configured, the submit path will wait util
409 * a new connection is reconfigured or util dead timeout.
412 if (cmd->index < config->num_connections) {
413 struct nbd_sock *nsock =
414 config->socks[cmd->index];
415 mutex_lock(&nsock->tx_lock);
416 /* We can have multiple outstanding requests, so
417 * we don't want to mark the nsock dead if we've
418 * already reconnected with a new socket, so
419 * only mark it dead if its the same socket we
422 if (cmd->cookie == nsock->cookie)
423 nbd_mark_nsock_dead(nbd, nsock, 1);
424 mutex_unlock(&nsock->tx_lock);
426 mutex_unlock(&cmd->lock);
427 nbd_requeue_cmd(cmd);
433 if (!nbd->tag_set.timeout) {
435 * Userspace sets timeout=0 to disable socket disconnection,
436 * so just warn and reset the timer.
438 struct nbd_sock *nsock = config->socks[cmd->index];
440 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
441 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
442 (unsigned long long)blk_rq_pos(req) << 9,
443 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
445 mutex_lock(&nsock->tx_lock);
446 if (cmd->cookie != nsock->cookie) {
447 nbd_requeue_cmd(cmd);
448 mutex_unlock(&nsock->tx_lock);
449 mutex_unlock(&cmd->lock);
453 mutex_unlock(&nsock->tx_lock);
454 mutex_unlock(&cmd->lock);
456 return BLK_EH_RESET_TIMER;
459 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
460 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
461 cmd->status = BLK_STS_IOERR;
462 mutex_unlock(&cmd->lock);
466 blk_mq_complete_request(req);
471 * Send or receive packet.
473 static int sock_xmit(struct nbd_device *nbd, int index, int send,
474 struct iov_iter *iter, int msg_flags, int *sent)
476 struct nbd_config *config = nbd->config;
477 struct socket *sock = config->socks[index]->sock;
480 unsigned int noreclaim_flag;
482 if (unlikely(!sock)) {
483 dev_err_ratelimited(disk_to_dev(nbd->disk),
484 "Attempted %s on closed socket in sock_xmit\n",
485 (send ? "send" : "recv"));
489 msg.msg_iter = *iter;
491 noreclaim_flag = memalloc_noreclaim_save();
493 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
496 msg.msg_control = NULL;
497 msg.msg_controllen = 0;
498 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
501 result = sock_sendmsg(sock, &msg);
503 result = sock_recvmsg(sock, &msg, msg.msg_flags);
507 result = -EPIPE; /* short read */
512 } while (msg_data_left(&msg));
514 memalloc_noreclaim_restore(noreclaim_flag);
520 * Different settings for sk->sk_sndtimeo can result in different return values
521 * if there is a signal pending when we enter sendmsg, because reasons?
523 static inline int was_interrupted(int result)
525 return result == -ERESTARTSYS || result == -EINTR;
528 /* always call with the tx_lock held */
529 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
531 struct request *req = blk_mq_rq_from_pdu(cmd);
532 struct nbd_config *config = nbd->config;
533 struct nbd_sock *nsock = config->socks[index];
535 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
536 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
537 struct iov_iter from;
538 unsigned long size = blk_rq_bytes(req);
542 u32 nbd_cmd_flags = 0;
543 int sent = nsock->sent, skip = 0;
545 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
547 type = req_to_nbd_cmd_type(req);
551 if (rq_data_dir(req) == WRITE &&
552 (config->flags & NBD_FLAG_READ_ONLY)) {
553 dev_err_ratelimited(disk_to_dev(nbd->disk),
554 "Write on read-only\n");
558 if (req->cmd_flags & REQ_FUA)
559 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
561 /* We did a partial send previously, and we at least sent the whole
562 * request struct, so just go and send the rest of the pages in the
566 if (sent >= sizeof(request)) {
567 skip = sent - sizeof(request);
569 /* initialize handle for tracing purposes */
570 handle = nbd_cmd_handle(cmd);
574 iov_iter_advance(&from, sent);
579 cmd->cookie = nsock->cookie;
581 request.type = htonl(type | nbd_cmd_flags);
582 if (type != NBD_CMD_FLUSH) {
583 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
584 request.len = htonl(size);
586 handle = nbd_cmd_handle(cmd);
587 memcpy(request.handle, &handle, sizeof(handle));
589 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
591 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
592 req, nbdcmd_to_ascii(type),
593 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
594 result = sock_xmit(nbd, index, 1, &from,
595 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
596 trace_nbd_header_sent(req, handle);
598 if (was_interrupted(result)) {
599 /* If we havne't sent anything we can just return BUSY,
600 * however if we have sent something we need to make
601 * sure we only allow this req to be sent until we are
605 nsock->pending = req;
608 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
609 return BLK_STS_RESOURCE;
611 dev_err_ratelimited(disk_to_dev(nbd->disk),
612 "Send control failed (result %d)\n", result);
616 if (type != NBD_CMD_WRITE)
621 struct bio *next = bio->bi_next;
622 struct bvec_iter iter;
625 bio_for_each_segment(bvec, bio, iter) {
626 bool is_last = !next && bio_iter_last(bvec, iter);
627 int flags = is_last ? 0 : MSG_MORE;
629 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
631 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
633 if (skip >= iov_iter_count(&from)) {
634 skip -= iov_iter_count(&from);
637 iov_iter_advance(&from, skip);
640 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
642 if (was_interrupted(result)) {
643 /* We've already sent the header, we
644 * have no choice but to set pending and
647 nsock->pending = req;
649 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
650 return BLK_STS_RESOURCE;
652 dev_err(disk_to_dev(nbd->disk),
653 "Send data failed (result %d)\n",
658 * The completion might already have come in,
659 * so break for the last one instead of letting
660 * the iterator do it. This prevents use-after-free
669 trace_nbd_payload_sent(req, handle);
670 nsock->pending = NULL;
675 /* NULL returned = something went wrong, inform userspace */
676 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
678 struct nbd_config *config = nbd->config;
680 struct nbd_reply reply;
682 struct request *req = NULL;
686 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
691 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
692 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
694 if (!nbd_disconnected(config))
695 dev_err(disk_to_dev(nbd->disk),
696 "Receive control failed (result %d)\n", result);
697 return ERR_PTR(result);
700 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
701 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
702 (unsigned long)ntohl(reply.magic));
703 return ERR_PTR(-EPROTO);
706 memcpy(&handle, reply.handle, sizeof(handle));
707 tag = nbd_handle_to_tag(handle);
708 hwq = blk_mq_unique_tag_to_hwq(tag);
709 if (hwq < nbd->tag_set.nr_hw_queues)
710 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
711 blk_mq_unique_tag_to_tag(tag));
712 if (!req || !blk_mq_request_started(req)) {
713 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
715 return ERR_PTR(-ENOENT);
717 trace_nbd_header_received(req, handle);
718 cmd = blk_mq_rq_to_pdu(req);
720 mutex_lock(&cmd->lock);
721 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
722 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
723 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
727 if (cmd->status != BLK_STS_OK) {
728 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
733 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
734 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
739 if (ntohl(reply.error)) {
740 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
742 cmd->status = BLK_STS_IOERR;
746 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
747 if (rq_data_dir(req) != WRITE) {
748 struct req_iterator iter;
751 rq_for_each_segment(bvec, req, iter) {
752 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
753 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
755 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
758 * If we've disconnected, we need to make sure we
759 * complete this request, otherwise error out
760 * and let the timeout stuff handle resubmitting
761 * this request onto another connection.
763 if (nbd_disconnected(config)) {
764 cmd->status = BLK_STS_IOERR;
770 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
775 trace_nbd_payload_received(req, handle);
776 mutex_unlock(&cmd->lock);
777 return ret ? ERR_PTR(ret) : cmd;
780 static void recv_work(struct work_struct *work)
782 struct recv_thread_args *args = container_of(work,
783 struct recv_thread_args,
785 struct nbd_device *nbd = args->nbd;
786 struct nbd_config *config = nbd->config;
791 cmd = nbd_read_stat(nbd, args->index);
793 struct nbd_sock *nsock = config->socks[args->index];
795 mutex_lock(&nsock->tx_lock);
796 nbd_mark_nsock_dead(nbd, nsock, 1);
797 mutex_unlock(&nsock->tx_lock);
801 rq = blk_mq_rq_from_pdu(cmd);
802 if (likely(!blk_should_fake_timeout(rq->q)))
803 blk_mq_complete_request(rq);
806 atomic_dec(&config->recv_threads);
807 wake_up(&config->recv_wq);
811 static bool nbd_clear_req(struct request *req, void *data, bool reserved)
813 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
815 /* don't abort one completed request */
816 if (blk_mq_request_completed(req))
819 mutex_lock(&cmd->lock);
820 cmd->status = BLK_STS_IOERR;
821 mutex_unlock(&cmd->lock);
823 blk_mq_complete_request(req);
827 static void nbd_clear_que(struct nbd_device *nbd)
829 blk_mq_quiesce_queue(nbd->disk->queue);
830 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
831 blk_mq_unquiesce_queue(nbd->disk->queue);
832 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
835 static int find_fallback(struct nbd_device *nbd, int index)
837 struct nbd_config *config = nbd->config;
839 struct nbd_sock *nsock = config->socks[index];
840 int fallback = nsock->fallback_index;
842 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
845 if (config->num_connections <= 1) {
846 dev_err_ratelimited(disk_to_dev(nbd->disk),
847 "Dead connection, failed to find a fallback\n");
851 if (fallback >= 0 && fallback < config->num_connections &&
852 !config->socks[fallback]->dead)
855 if (nsock->fallback_index < 0 ||
856 nsock->fallback_index >= config->num_connections ||
857 config->socks[nsock->fallback_index]->dead) {
859 for (i = 0; i < config->num_connections; i++) {
862 if (!config->socks[i]->dead) {
867 nsock->fallback_index = new_index;
869 dev_err_ratelimited(disk_to_dev(nbd->disk),
870 "Dead connection, failed to find a fallback\n");
874 new_index = nsock->fallback_index;
878 static int wait_for_reconnect(struct nbd_device *nbd)
880 struct nbd_config *config = nbd->config;
881 if (!config->dead_conn_timeout)
884 if (!wait_event_timeout(config->conn_wait,
885 test_bit(NBD_RT_DISCONNECTED,
886 &config->runtime_flags) ||
887 atomic_read(&config->live_connections) > 0,
888 config->dead_conn_timeout))
891 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
894 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
896 struct request *req = blk_mq_rq_from_pdu(cmd);
897 struct nbd_device *nbd = cmd->nbd;
898 struct nbd_config *config;
899 struct nbd_sock *nsock;
902 if (!refcount_inc_not_zero(&nbd->config_refs)) {
903 dev_err_ratelimited(disk_to_dev(nbd->disk),
904 "Socks array is empty\n");
905 blk_mq_start_request(req);
908 config = nbd->config;
910 if (index >= config->num_connections) {
911 dev_err_ratelimited(disk_to_dev(nbd->disk),
912 "Attempted send on invalid socket\n");
914 blk_mq_start_request(req);
917 cmd->status = BLK_STS_OK;
919 nsock = config->socks[index];
920 mutex_lock(&nsock->tx_lock);
922 int old_index = index;
923 index = find_fallback(nbd, index);
924 mutex_unlock(&nsock->tx_lock);
926 if (wait_for_reconnect(nbd)) {
930 /* All the sockets should already be down at this point,
931 * we just want to make sure that DISCONNECTED is set so
932 * any requests that come in that were queue'ed waiting
933 * for the reconnect timer don't trigger the timer again
934 * and instead just error out.
938 blk_mq_start_request(req);
944 /* Handle the case that we have a pending request that was partially
945 * transmitted that _has_ to be serviced first. We need to call requeue
946 * here so that it gets put _after_ the request that is already on the
949 blk_mq_start_request(req);
950 if (unlikely(nsock->pending && nsock->pending != req)) {
951 nbd_requeue_cmd(cmd);
956 * Some failures are related to the link going down, so anything that
957 * returns EAGAIN can be retried on a different socket.
959 ret = nbd_send_cmd(nbd, cmd, index);
960 if (ret == -EAGAIN) {
961 dev_err_ratelimited(disk_to_dev(nbd->disk),
962 "Request send failed, requeueing\n");
963 nbd_mark_nsock_dead(nbd, nsock, 1);
964 nbd_requeue_cmd(cmd);
968 mutex_unlock(&nsock->tx_lock);
973 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
974 const struct blk_mq_queue_data *bd)
976 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
980 * Since we look at the bio's to send the request over the network we
981 * need to make sure the completion work doesn't mark this request done
982 * before we are done doing our send. This keeps us from dereferencing
983 * freed data if we have particularly fast completions (ie we get the
984 * completion before we exit sock_xmit on the last bvec) or in the case
985 * that the server is misbehaving (or there was an error) before we're
986 * done sending everything over the wire.
988 mutex_lock(&cmd->lock);
989 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
991 /* We can be called directly from the user space process, which means we
992 * could possibly have signals pending so our sendmsg will fail. In
993 * this case we need to return that we are busy, otherwise error out as
996 ret = nbd_handle_cmd(cmd, hctx->queue_num);
1001 mutex_unlock(&cmd->lock);
1006 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1009 struct socket *sock;
1012 sock = sockfd_lookup(fd, err);
1016 if (sock->ops->shutdown == sock_no_shutdown) {
1017 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1026 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1029 struct nbd_config *config = nbd->config;
1030 struct socket *sock;
1031 struct nbd_sock **socks;
1032 struct nbd_sock *nsock;
1035 sock = nbd_get_socket(nbd, arg, &err);
1040 * We need to make sure we don't get any errant requests while we're
1041 * reallocating the ->socks array.
1043 blk_mq_freeze_queue(nbd->disk->queue);
1045 if (!netlink && !nbd->task_setup &&
1046 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1047 nbd->task_setup = current;
1050 (nbd->task_setup != current ||
1051 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1052 dev_err(disk_to_dev(nbd->disk),
1053 "Device being setup by another task");
1058 nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1064 socks = krealloc(config->socks, (config->num_connections + 1) *
1065 sizeof(struct nbd_sock *), GFP_KERNEL);
1072 config->socks = socks;
1074 nsock->fallback_index = -1;
1075 nsock->dead = false;
1076 mutex_init(&nsock->tx_lock);
1078 nsock->pending = NULL;
1081 socks[config->num_connections++] = nsock;
1082 atomic_inc(&config->live_connections);
1083 blk_mq_unfreeze_queue(nbd->disk->queue);
1088 blk_mq_unfreeze_queue(nbd->disk->queue);
1093 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1095 struct nbd_config *config = nbd->config;
1096 struct socket *sock, *old;
1097 struct recv_thread_args *args;
1101 sock = nbd_get_socket(nbd, arg, &err);
1105 args = kzalloc(sizeof(*args), GFP_KERNEL);
1111 for (i = 0; i < config->num_connections; i++) {
1112 struct nbd_sock *nsock = config->socks[i];
1117 mutex_lock(&nsock->tx_lock);
1119 mutex_unlock(&nsock->tx_lock);
1122 sk_set_memalloc(sock->sk);
1123 if (nbd->tag_set.timeout)
1124 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1125 atomic_inc(&config->recv_threads);
1126 refcount_inc(&nbd->config_refs);
1128 nsock->fallback_index = -1;
1130 nsock->dead = false;
1131 INIT_WORK(&args->work, recv_work);
1135 mutex_unlock(&nsock->tx_lock);
1138 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1140 /* We take the tx_mutex in an error path in the recv_work, so we
1141 * need to queue_work outside of the tx_mutex.
1143 queue_work(nbd->recv_workq, &args->work);
1145 atomic_inc(&config->live_connections);
1146 wake_up(&config->conn_wait);
1154 static void nbd_bdev_reset(struct block_device *bdev)
1156 if (bdev->bd_openers > 1)
1158 bd_set_nr_sectors(bdev, 0);
1161 static void nbd_parse_flags(struct nbd_device *nbd)
1163 struct nbd_config *config = nbd->config;
1164 if (config->flags & NBD_FLAG_READ_ONLY)
1165 set_disk_ro(nbd->disk, true);
1167 set_disk_ro(nbd->disk, false);
1168 if (config->flags & NBD_FLAG_SEND_TRIM)
1169 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1170 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1171 if (config->flags & NBD_FLAG_SEND_FUA)
1172 blk_queue_write_cache(nbd->disk->queue, true, true);
1174 blk_queue_write_cache(nbd->disk->queue, true, false);
1177 blk_queue_write_cache(nbd->disk->queue, false, false);
1180 static void send_disconnects(struct nbd_device *nbd)
1182 struct nbd_config *config = nbd->config;
1183 struct nbd_request request = {
1184 .magic = htonl(NBD_REQUEST_MAGIC),
1185 .type = htonl(NBD_CMD_DISC),
1187 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1188 struct iov_iter from;
1191 for (i = 0; i < config->num_connections; i++) {
1192 struct nbd_sock *nsock = config->socks[i];
1194 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
1195 mutex_lock(&nsock->tx_lock);
1196 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1198 dev_err(disk_to_dev(nbd->disk),
1199 "Send disconnect failed %d\n", ret);
1200 mutex_unlock(&nsock->tx_lock);
1204 static int nbd_disconnect(struct nbd_device *nbd)
1206 struct nbd_config *config = nbd->config;
1208 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1209 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1210 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1211 send_disconnects(nbd);
1215 static void nbd_clear_sock(struct nbd_device *nbd)
1219 nbd->task_setup = NULL;
1222 static void nbd_config_put(struct nbd_device *nbd)
1224 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1225 &nbd->config_lock)) {
1226 struct nbd_config *config = nbd->config;
1227 nbd_dev_dbg_close(nbd);
1228 nbd_size_clear(nbd);
1229 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1230 &config->runtime_flags))
1231 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1232 nbd->task_recv = NULL;
1233 nbd_clear_sock(nbd);
1234 if (config->num_connections) {
1236 for (i = 0; i < config->num_connections; i++) {
1237 sockfd_put(config->socks[i]->sock);
1238 kfree(config->socks[i]);
1240 kfree(config->socks);
1245 if (nbd->recv_workq)
1246 destroy_workqueue(nbd->recv_workq);
1247 nbd->recv_workq = NULL;
1249 nbd->tag_set.timeout = 0;
1250 nbd->disk->queue->limits.discard_granularity = 0;
1251 nbd->disk->queue->limits.discard_alignment = 0;
1252 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
1253 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1255 mutex_unlock(&nbd->config_lock);
1257 module_put(THIS_MODULE);
1261 static int nbd_start_device(struct nbd_device *nbd)
1263 struct nbd_config *config = nbd->config;
1264 int num_connections = config->num_connections;
1271 if (num_connections > 1 &&
1272 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1273 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1277 nbd->recv_workq = alloc_workqueue("knbd%d-recv",
1278 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1279 WQ_UNBOUND, 0, nbd->index);
1280 if (!nbd->recv_workq) {
1281 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1285 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1286 nbd->task_recv = current;
1288 nbd_parse_flags(nbd);
1290 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1292 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1295 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1297 nbd_dev_dbg_init(nbd);
1298 for (i = 0; i < num_connections; i++) {
1299 struct recv_thread_args *args;
1301 args = kzalloc(sizeof(*args), GFP_KERNEL);
1305 * If num_connections is m (2 < m),
1306 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1307 * But NO.(n + 1) failed. We still have n recv threads.
1308 * So, add flush_workqueue here to prevent recv threads
1309 * dropping the last config_refs and trying to destroy
1310 * the workqueue from inside the workqueue.
1313 flush_workqueue(nbd->recv_workq);
1316 sk_set_memalloc(config->socks[i]->sock->sk);
1317 if (nbd->tag_set.timeout)
1318 config->socks[i]->sock->sk->sk_sndtimeo =
1319 nbd->tag_set.timeout;
1320 atomic_inc(&config->recv_threads);
1321 refcount_inc(&nbd->config_refs);
1322 INIT_WORK(&args->work, recv_work);
1325 queue_work(nbd->recv_workq, &args->work);
1327 nbd_size_update(nbd, true);
1331 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1333 struct nbd_config *config = nbd->config;
1336 ret = nbd_start_device(nbd);
1341 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1342 mutex_unlock(&nbd->config_lock);
1343 ret = wait_event_interruptible(config->recv_wq,
1344 atomic_read(&config->recv_threads) == 0);
1350 flush_workqueue(nbd->recv_workq);
1351 mutex_lock(&nbd->config_lock);
1352 nbd_bdev_reset(bdev);
1353 /* user requested, ignore socket errors */
1354 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1356 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1361 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1362 struct block_device *bdev)
1364 nbd_clear_sock(nbd);
1365 __invalidate_device(bdev, true);
1366 nbd_bdev_reset(bdev);
1367 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1368 &nbd->config->runtime_flags))
1369 nbd_config_put(nbd);
1372 static bool nbd_is_valid_blksize(unsigned long blksize)
1374 if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
1375 blksize > PAGE_SIZE)
1380 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1382 nbd->tag_set.timeout = timeout * HZ;
1384 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1386 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1389 /* Must be called with config_lock held */
1390 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1391 unsigned int cmd, unsigned long arg)
1393 struct nbd_config *config = nbd->config;
1396 case NBD_DISCONNECT:
1397 return nbd_disconnect(nbd);
1398 case NBD_CLEAR_SOCK:
1399 nbd_clear_sock_ioctl(nbd, bdev);
1402 return nbd_add_socket(nbd, arg, false);
1403 case NBD_SET_BLKSIZE:
1405 arg = NBD_DEF_BLKSIZE;
1406 if (!nbd_is_valid_blksize(arg))
1408 nbd_size_set(nbd, arg,
1409 div_s64(config->bytesize, arg));
1412 nbd_size_set(nbd, config->blksize,
1413 div_s64(arg, config->blksize));
1415 case NBD_SET_SIZE_BLOCKS:
1416 nbd_size_set(nbd, config->blksize, arg);
1418 case NBD_SET_TIMEOUT:
1419 nbd_set_cmd_timeout(nbd, arg);
1423 config->flags = arg;
1426 return nbd_start_device_ioctl(nbd, bdev);
1429 * This is for compatibility only. The queue is always cleared
1430 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1433 case NBD_PRINT_DEBUG:
1435 * For compatibility only, we no longer keep a list of
1436 * outstanding requests.
1443 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1444 unsigned int cmd, unsigned long arg)
1446 struct nbd_device *nbd = bdev->bd_disk->private_data;
1447 struct nbd_config *config = nbd->config;
1448 int error = -EINVAL;
1450 if (!capable(CAP_SYS_ADMIN))
1453 /* The block layer will pass back some non-nbd ioctls in case we have
1454 * special handling for them, but we don't so just return an error.
1456 if (_IOC_TYPE(cmd) != 0xab)
1459 mutex_lock(&nbd->config_lock);
1461 /* Don't allow ioctl operations on a nbd device that was created with
1462 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1464 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1465 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1466 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1468 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1469 mutex_unlock(&nbd->config_lock);
1473 static struct nbd_config *nbd_alloc_config(void)
1475 struct nbd_config *config;
1477 if (!try_module_get(THIS_MODULE))
1478 return ERR_PTR(-ENODEV);
1480 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1482 module_put(THIS_MODULE);
1483 return ERR_PTR(-ENOMEM);
1486 atomic_set(&config->recv_threads, 0);
1487 init_waitqueue_head(&config->recv_wq);
1488 init_waitqueue_head(&config->conn_wait);
1489 config->blksize = NBD_DEF_BLKSIZE;
1490 atomic_set(&config->live_connections, 0);
1494 static int nbd_open(struct block_device *bdev, fmode_t mode)
1496 struct nbd_device *nbd;
1499 mutex_lock(&nbd_index_mutex);
1500 nbd = bdev->bd_disk->private_data;
1505 if (!refcount_inc_not_zero(&nbd->refs)) {
1509 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1510 struct nbd_config *config;
1512 mutex_lock(&nbd->config_lock);
1513 if (refcount_inc_not_zero(&nbd->config_refs)) {
1514 mutex_unlock(&nbd->config_lock);
1517 config = nbd_alloc_config();
1518 if (IS_ERR(config)) {
1519 ret = PTR_ERR(config);
1520 mutex_unlock(&nbd->config_lock);
1523 nbd->config = config;
1524 refcount_set(&nbd->config_refs, 1);
1525 refcount_inc(&nbd->refs);
1526 mutex_unlock(&nbd->config_lock);
1527 set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1528 } else if (nbd_disconnected(nbd->config)) {
1529 set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1532 mutex_unlock(&nbd_index_mutex);
1536 static void nbd_release(struct gendisk *disk, fmode_t mode)
1538 struct nbd_device *nbd = disk->private_data;
1539 struct block_device *bdev = bdget_disk(disk, 0);
1541 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1542 bdev->bd_openers == 0)
1543 nbd_disconnect_and_put(nbd);
1546 nbd_config_put(nbd);
1550 static const struct block_device_operations nbd_fops =
1552 .owner = THIS_MODULE,
1554 .release = nbd_release,
1556 .compat_ioctl = nbd_ioctl,
1559 #if IS_ENABLED(CONFIG_DEBUG_FS)
1561 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1563 struct nbd_device *nbd = s->private;
1566 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
1571 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1573 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1576 static const struct file_operations nbd_dbg_tasks_ops = {
1577 .open = nbd_dbg_tasks_open,
1579 .llseek = seq_lseek,
1580 .release = single_release,
1583 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1585 struct nbd_device *nbd = s->private;
1586 u32 flags = nbd->config->flags;
1588 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1590 seq_puts(s, "Known flags:\n");
1592 if (flags & NBD_FLAG_HAS_FLAGS)
1593 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1594 if (flags & NBD_FLAG_READ_ONLY)
1595 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1596 if (flags & NBD_FLAG_SEND_FLUSH)
1597 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1598 if (flags & NBD_FLAG_SEND_FUA)
1599 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1600 if (flags & NBD_FLAG_SEND_TRIM)
1601 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1606 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1608 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1611 static const struct file_operations nbd_dbg_flags_ops = {
1612 .open = nbd_dbg_flags_open,
1614 .llseek = seq_lseek,
1615 .release = single_release,
1618 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1621 struct nbd_config *config = nbd->config;
1626 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1628 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1632 config->dbg_dir = dir;
1634 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
1635 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1636 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1637 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1638 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
1643 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1645 debugfs_remove_recursive(nbd->config->dbg_dir);
1648 static int nbd_dbg_init(void)
1650 struct dentry *dbg_dir;
1652 dbg_dir = debugfs_create_dir("nbd", NULL);
1656 nbd_dbg_dir = dbg_dir;
1661 static void nbd_dbg_close(void)
1663 debugfs_remove_recursive(nbd_dbg_dir);
1666 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1668 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1673 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1677 static int nbd_dbg_init(void)
1682 static void nbd_dbg_close(void)
1688 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1689 unsigned int hctx_idx, unsigned int numa_node)
1691 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1692 cmd->nbd = set->driver_data;
1694 mutex_init(&cmd->lock);
1698 static const struct blk_mq_ops nbd_mq_ops = {
1699 .queue_rq = nbd_queue_rq,
1700 .complete = nbd_complete_rq,
1701 .init_request = nbd_init_request,
1702 .timeout = nbd_xmit_timeout,
1705 static int nbd_dev_add(int index)
1707 struct nbd_device *nbd;
1708 struct gendisk *disk;
1709 struct request_queue *q;
1712 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1716 disk = alloc_disk(1 << part_shift);
1721 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1726 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1735 nbd->tag_set.ops = &nbd_mq_ops;
1736 nbd->tag_set.nr_hw_queues = 1;
1737 nbd->tag_set.queue_depth = 128;
1738 nbd->tag_set.numa_node = NUMA_NO_NODE;
1739 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1740 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1742 nbd->tag_set.driver_data = nbd;
1743 nbd->destroy_complete = NULL;
1745 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1749 q = blk_mq_init_queue(&nbd->tag_set);
1757 * Tell the block layer that we are not a rotational device
1759 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1760 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1761 disk->queue->limits.discard_granularity = 0;
1762 disk->queue->limits.discard_alignment = 0;
1763 blk_queue_max_discard_sectors(disk->queue, 0);
1764 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1765 blk_queue_max_segments(disk->queue, USHRT_MAX);
1766 blk_queue_max_hw_sectors(disk->queue, 65536);
1767 disk->queue->limits.max_sectors = 256;
1769 mutex_init(&nbd->config_lock);
1770 refcount_set(&nbd->config_refs, 0);
1771 refcount_set(&nbd->refs, 1);
1772 INIT_LIST_HEAD(&nbd->list);
1773 disk->major = NBD_MAJOR;
1774 disk->first_minor = index << part_shift;
1775 disk->fops = &nbd_fops;
1776 disk->private_data = nbd;
1777 sprintf(disk->disk_name, "nbd%d", index);
1779 nbd_total_devices++;
1783 blk_mq_free_tag_set(&nbd->tag_set);
1785 idr_remove(&nbd_index_idr, index);
1794 static int find_free_cb(int id, void *ptr, void *data)
1796 struct nbd_device *nbd = ptr;
1797 struct nbd_device **found = data;
1799 if (!refcount_read(&nbd->config_refs)) {
1806 /* Netlink interface. */
1807 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1808 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1809 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1810 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1811 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1812 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1813 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1814 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1815 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1816 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1819 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1820 [NBD_SOCK_FD] = { .type = NLA_U32 },
1823 /* We don't use this right now since we don't parse the incoming list, but we
1824 * still want it here so userspace knows what to expect.
1826 static const struct nla_policy __attribute__((unused))
1827 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1828 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1829 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1832 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1834 struct nbd_config *config = nbd->config;
1835 u64 bsize = config->blksize;
1836 u64 bytes = config->bytesize;
1838 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1839 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1841 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1842 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1844 bsize = NBD_DEF_BLKSIZE;
1845 if (!nbd_is_valid_blksize(bsize)) {
1846 printk(KERN_ERR "Invalid block size %llu\n", bsize);
1851 if (bytes != config->bytesize || bsize != config->blksize)
1852 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
1856 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1858 DECLARE_COMPLETION_ONSTACK(destroy_complete);
1859 struct nbd_device *nbd = NULL;
1860 struct nbd_config *config;
1863 bool put_dev = false;
1865 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1868 if (info->attrs[NBD_ATTR_INDEX])
1869 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1870 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1871 printk(KERN_ERR "nbd: must specify at least one socket\n");
1874 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1875 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1879 mutex_lock(&nbd_index_mutex);
1881 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1884 new_index = nbd_dev_add(-1);
1885 if (new_index < 0) {
1886 mutex_unlock(&nbd_index_mutex);
1887 printk(KERN_ERR "nbd: failed to add new device\n");
1890 nbd = idr_find(&nbd_index_idr, new_index);
1893 nbd = idr_find(&nbd_index_idr, index);
1895 ret = nbd_dev_add(index);
1897 mutex_unlock(&nbd_index_mutex);
1898 printk(KERN_ERR "nbd: failed to add new device\n");
1901 nbd = idr_find(&nbd_index_idr, index);
1905 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1907 mutex_unlock(&nbd_index_mutex);
1911 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1912 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
1913 nbd->destroy_complete = &destroy_complete;
1914 mutex_unlock(&nbd_index_mutex);
1916 /* Wait untill the the nbd stuff is totally destroyed */
1917 wait_for_completion(&destroy_complete);
1921 if (!refcount_inc_not_zero(&nbd->refs)) {
1922 mutex_unlock(&nbd_index_mutex);
1925 printk(KERN_ERR "nbd: device at index %d is going down\n",
1929 mutex_unlock(&nbd_index_mutex);
1931 mutex_lock(&nbd->config_lock);
1932 if (refcount_read(&nbd->config_refs)) {
1933 mutex_unlock(&nbd->config_lock);
1937 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1940 if (WARN_ON(nbd->config)) {
1941 mutex_unlock(&nbd->config_lock);
1945 config = nbd_alloc_config();
1946 if (IS_ERR(config)) {
1947 mutex_unlock(&nbd->config_lock);
1949 printk(KERN_ERR "nbd: couldn't allocate config\n");
1950 return PTR_ERR(config);
1952 nbd->config = config;
1953 refcount_set(&nbd->config_refs, 1);
1954 set_bit(NBD_RT_BOUND, &config->runtime_flags);
1956 ret = nbd_genl_size_set(info, nbd);
1960 if (info->attrs[NBD_ATTR_TIMEOUT])
1961 nbd_set_cmd_timeout(nbd,
1962 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
1963 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1964 config->dead_conn_timeout =
1965 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1966 config->dead_conn_timeout *= HZ;
1968 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1970 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1971 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1972 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1973 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1975 * We have 1 ref to keep the device around, and then 1
1976 * ref for our current operation here, which will be
1977 * inherited by the config. If we already have
1978 * DESTROY_ON_DISCONNECT set then we know we don't have
1979 * that extra ref already held so we don't need the
1982 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1986 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1988 refcount_inc(&nbd->refs);
1990 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1991 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
1992 &config->runtime_flags);
1996 if (info->attrs[NBD_ATTR_SOCKETS]) {
1997 struct nlattr *attr;
2000 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2002 struct nlattr *socks[NBD_SOCK_MAX+1];
2004 if (nla_type(attr) != NBD_SOCK_ITEM) {
2005 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2009 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2014 printk(KERN_ERR "nbd: error processing sock list\n");
2018 if (!socks[NBD_SOCK_FD])
2020 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2021 ret = nbd_add_socket(nbd, fd, true);
2026 ret = nbd_start_device(nbd);
2028 mutex_unlock(&nbd->config_lock);
2030 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2031 refcount_inc(&nbd->config_refs);
2032 nbd_connect_reply(info, nbd->index);
2034 nbd_config_put(nbd);
2040 static void nbd_disconnect_and_put(struct nbd_device *nbd)
2042 mutex_lock(&nbd->config_lock);
2043 nbd_disconnect(nbd);
2045 wake_up(&nbd->config->conn_wait);
2047 * Make sure recv thread has finished, so it does not drop the last
2048 * config ref and try to destroy the workqueue from inside the work
2049 * queue. And this also ensure that we can safely call nbd_clear_que()
2050 * to cancel the inflight I/Os.
2052 if (nbd->recv_workq)
2053 flush_workqueue(nbd->recv_workq);
2055 nbd->task_setup = NULL;
2056 mutex_unlock(&nbd->config_lock);
2058 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2059 &nbd->config->runtime_flags))
2060 nbd_config_put(nbd);
2063 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2065 struct nbd_device *nbd;
2068 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2071 if (!info->attrs[NBD_ATTR_INDEX]) {
2072 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
2075 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2076 mutex_lock(&nbd_index_mutex);
2077 nbd = idr_find(&nbd_index_idr, index);
2079 mutex_unlock(&nbd_index_mutex);
2080 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
2084 if (!refcount_inc_not_zero(&nbd->refs)) {
2085 mutex_unlock(&nbd_index_mutex);
2086 printk(KERN_ERR "nbd: device at index %d is going down\n",
2090 mutex_unlock(&nbd_index_mutex);
2091 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2095 nbd_disconnect_and_put(nbd);
2096 nbd_config_put(nbd);
2101 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2103 struct nbd_device *nbd = NULL;
2104 struct nbd_config *config;
2107 bool put_dev = false;
2109 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2112 if (!info->attrs[NBD_ATTR_INDEX]) {
2113 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
2116 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2117 mutex_lock(&nbd_index_mutex);
2118 nbd = idr_find(&nbd_index_idr, index);
2120 mutex_unlock(&nbd_index_mutex);
2121 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
2125 if (!refcount_inc_not_zero(&nbd->refs)) {
2126 mutex_unlock(&nbd_index_mutex);
2127 printk(KERN_ERR "nbd: device at index %d is going down\n",
2131 mutex_unlock(&nbd_index_mutex);
2133 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2134 dev_err(nbd_to_dev(nbd),
2135 "not configured, cannot reconfigure\n");
2140 mutex_lock(&nbd->config_lock);
2141 config = nbd->config;
2142 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2144 dev_err(nbd_to_dev(nbd),
2145 "not configured, cannot reconfigure\n");
2150 ret = nbd_genl_size_set(info, nbd);
2154 if (info->attrs[NBD_ATTR_TIMEOUT])
2155 nbd_set_cmd_timeout(nbd,
2156 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2157 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2158 config->dead_conn_timeout =
2159 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2160 config->dead_conn_timeout *= HZ;
2162 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2163 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2164 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2165 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2169 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2171 refcount_inc(&nbd->refs);
2174 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2175 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2176 &config->runtime_flags);
2178 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2179 &config->runtime_flags);
2183 if (info->attrs[NBD_ATTR_SOCKETS]) {
2184 struct nlattr *attr;
2187 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2189 struct nlattr *socks[NBD_SOCK_MAX+1];
2191 if (nla_type(attr) != NBD_SOCK_ITEM) {
2192 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2196 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2201 printk(KERN_ERR "nbd: error processing sock list\n");
2205 if (!socks[NBD_SOCK_FD])
2207 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2208 ret = nbd_reconnect_socket(nbd, fd);
2214 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2218 mutex_unlock(&nbd->config_lock);
2219 nbd_config_put(nbd);
2226 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2228 .cmd = NBD_CMD_CONNECT,
2229 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2230 .doit = nbd_genl_connect,
2233 .cmd = NBD_CMD_DISCONNECT,
2234 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2235 .doit = nbd_genl_disconnect,
2238 .cmd = NBD_CMD_RECONFIGURE,
2239 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2240 .doit = nbd_genl_reconfigure,
2243 .cmd = NBD_CMD_STATUS,
2244 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2245 .doit = nbd_genl_status,
2249 static const struct genl_multicast_group nbd_mcast_grps[] = {
2250 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2253 static struct genl_family nbd_genl_family __ro_after_init = {
2255 .name = NBD_GENL_FAMILY_NAME,
2256 .version = NBD_GENL_VERSION,
2257 .module = THIS_MODULE,
2258 .small_ops = nbd_connect_genl_ops,
2259 .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2260 .maxattr = NBD_ATTR_MAX,
2261 .policy = nbd_attr_policy,
2262 .mcgrps = nbd_mcast_grps,
2263 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2266 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2268 struct nlattr *dev_opt;
2272 /* This is a little racey, but for status it's ok. The
2273 * reason we don't take a ref here is because we can't
2274 * take a ref in the index == -1 case as we would need
2275 * to put under the nbd_index_mutex, which could
2276 * deadlock if we are configured to remove ourselves
2277 * once we're disconnected.
2279 if (refcount_read(&nbd->config_refs))
2281 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2284 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2287 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2291 nla_nest_end(reply, dev_opt);
2295 static int status_cb(int id, void *ptr, void *data)
2297 struct nbd_device *nbd = ptr;
2298 return populate_nbd_status(nbd, (struct sk_buff *)data);
2301 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2303 struct nlattr *dev_list;
2304 struct sk_buff *reply;
2310 if (info->attrs[NBD_ATTR_INDEX])
2311 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2313 mutex_lock(&nbd_index_mutex);
2315 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2316 nla_attr_size(sizeof(u8)));
2317 msg_size *= (index == -1) ? nbd_total_devices : 1;
2319 reply = genlmsg_new(msg_size, GFP_KERNEL);
2322 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2329 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2331 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2337 struct nbd_device *nbd;
2338 nbd = idr_find(&nbd_index_idr, index);
2340 ret = populate_nbd_status(nbd, reply);
2347 nla_nest_end(reply, dev_list);
2348 genlmsg_end(reply, reply_head);
2349 ret = genlmsg_reply(reply, info);
2351 mutex_unlock(&nbd_index_mutex);
2355 static void nbd_connect_reply(struct genl_info *info, int index)
2357 struct sk_buff *skb;
2361 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2364 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2370 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2375 genlmsg_end(skb, msg_head);
2376 genlmsg_reply(skb, info);
2379 static void nbd_mcast_index(int index)
2381 struct sk_buff *skb;
2385 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2388 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2394 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2399 genlmsg_end(skb, msg_head);
2400 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2403 static void nbd_dead_link_work(struct work_struct *work)
2405 struct link_dead_args *args = container_of(work, struct link_dead_args,
2407 nbd_mcast_index(args->index);
2411 static int __init nbd_init(void)
2415 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2418 printk(KERN_ERR "nbd: max_part must be >= 0\n");
2424 part_shift = fls(max_part);
2427 * Adjust max_part according to part_shift as it is exported
2428 * to user space so that user can know the max number of
2429 * partition kernel should be able to manage.
2431 * Note that -1 is required because partition 0 is reserved
2432 * for the whole disk.
2434 max_part = (1UL << part_shift) - 1;
2437 if ((1UL << part_shift) > DISK_MAX_PARTS)
2440 if (nbds_max > 1UL << (MINORBITS - part_shift))
2443 if (register_blkdev(NBD_MAJOR, "nbd"))
2446 if (genl_register_family(&nbd_genl_family)) {
2447 unregister_blkdev(NBD_MAJOR, "nbd");
2452 mutex_lock(&nbd_index_mutex);
2453 for (i = 0; i < nbds_max; i++)
2455 mutex_unlock(&nbd_index_mutex);
2459 static int nbd_exit_cb(int id, void *ptr, void *data)
2461 struct list_head *list = (struct list_head *)data;
2462 struct nbd_device *nbd = ptr;
2464 list_add_tail(&nbd->list, list);
2468 static void __exit nbd_cleanup(void)
2470 struct nbd_device *nbd;
2471 LIST_HEAD(del_list);
2474 * Unregister netlink interface prior to waiting
2475 * for the completion of netlink commands.
2477 genl_unregister_family(&nbd_genl_family);
2481 mutex_lock(&nbd_index_mutex);
2482 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2483 mutex_unlock(&nbd_index_mutex);
2485 while (!list_empty(&del_list)) {
2486 nbd = list_first_entry(&del_list, struct nbd_device, list);
2487 list_del_init(&nbd->list);
2488 if (refcount_read(&nbd->config_refs))
2489 printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
2490 refcount_read(&nbd->config_refs));
2491 if (refcount_read(&nbd->refs) != 1)
2492 printk(KERN_ERR "nbd: possibly leaking a device\n");
2496 idr_destroy(&nbd_index_idr);
2497 unregister_blkdev(NBD_MAJOR, "nbd");
2500 module_init(nbd_init);
2501 module_exit(nbd_cleanup);
2503 MODULE_DESCRIPTION("Network Block Device");
2504 MODULE_LICENSE("GPL");
2506 module_param(nbds_max, int, 0444);
2507 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2508 module_param(max_part, int, 0444);
2509 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");