2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <asm/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
44 #define NBD_TIMEDOUT 0
45 #define NBD_DISCONNECT_REQUESTED 1
49 unsigned long runtime_flags;
50 struct socket * sock; /* If == NULL, device is not ready, yet */
53 struct blk_mq_tag_set tag_set;
60 /* protects initialization and shutdown of the socket */
62 struct task_struct *task_recv;
63 struct task_struct *task_send;
65 #if IS_ENABLED(CONFIG_DEBUG_FS)
66 struct dentry *dbg_dir;
71 struct nbd_device *nbd;
72 struct list_head list;
75 #if IS_ENABLED(CONFIG_DEBUG_FS)
76 static struct dentry *nbd_dbg_dir;
79 #define nbd_name(nbd) ((nbd)->disk->disk_name)
81 #define NBD_MAGIC 0x68797548
83 static unsigned int nbds_max = 16;
84 static struct nbd_device *nbd_dev;
87 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
89 return disk_to_dev(nbd->disk);
92 static bool nbd_is_connected(struct nbd_device *nbd)
94 return !!nbd->task_recv;
97 static const char *nbdcmd_to_ascii(int cmd)
100 case NBD_CMD_READ: return "read";
101 case NBD_CMD_WRITE: return "write";
102 case NBD_CMD_DISC: return "disconnect";
103 case NBD_CMD_FLUSH: return "flush";
104 case NBD_CMD_TRIM: return "trim/discard";
109 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
111 bd_set_size(bdev, 0);
112 set_capacity(nbd->disk, 0);
113 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
118 static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
120 blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize);
121 blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize);
122 bd_set_size(bdev, nbd->bytesize);
123 set_blocksize(bdev, nbd->blksize);
124 set_capacity(nbd->disk, nbd->bytesize >> 9);
125 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
128 static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
129 loff_t blocksize, loff_t nr_blocks)
131 nbd->blksize = blocksize;
132 nbd->bytesize = blocksize * nr_blocks;
133 if (nbd_is_connected(nbd))
134 nbd_size_update(nbd, bdev);
137 static void nbd_end_request(struct nbd_cmd *cmd)
139 struct nbd_device *nbd = cmd->nbd;
140 struct request *req = blk_mq_rq_from_pdu(cmd);
141 int error = req->errors ? -EIO : 0;
143 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
144 error ? "failed" : "done");
146 blk_mq_complete_request(req, error);
150 * Forcibly shutdown the socket causing all listeners to error
152 static void sock_shutdown(struct nbd_device *nbd)
156 spin_lock(&nbd->sock_lock);
159 spin_unlock(&nbd->sock_lock);
164 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
166 spin_unlock(&nbd->sock_lock);
168 kernel_sock_shutdown(sock, SHUT_RDWR);
172 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
175 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
176 struct nbd_device *nbd = cmd->nbd;
177 struct socket *sock = NULL;
179 spin_lock(&nbd->sock_lock);
181 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
185 get_file(sock->file);
188 spin_unlock(&nbd->sock_lock);
190 kernel_sock_shutdown(sock, SHUT_RDWR);
195 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
196 return BLK_EH_HANDLED;
200 * Send or receive packet.
202 static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
205 struct socket *sock = nbd->sock;
209 unsigned long pflags = current->flags;
211 if (unlikely(!sock)) {
212 dev_err(disk_to_dev(nbd->disk),
213 "Attempted %s on closed socket in sock_xmit\n",
214 (send ? "send" : "recv"));
218 current->flags |= PF_MEMALLOC;
220 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
225 msg.msg_control = NULL;
226 msg.msg_controllen = 0;
227 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
230 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
232 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
237 result = -EPIPE; /* short read */
244 tsk_restore_flags(current, pflags, PF_MEMALLOC);
249 static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
253 void *kaddr = kmap(bvec->bv_page);
254 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
255 bvec->bv_len, flags);
256 kunmap(bvec->bv_page);
260 /* always call with the tx_lock held */
261 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
263 struct request *req = blk_mq_rq_from_pdu(cmd);
265 struct nbd_request request;
266 unsigned long size = blk_rq_bytes(req);
270 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
272 else if (req_op(req) == REQ_OP_DISCARD)
274 else if (req_op(req) == REQ_OP_FLUSH)
275 type = NBD_CMD_FLUSH;
276 else if (rq_data_dir(req) == WRITE)
277 type = NBD_CMD_WRITE;
281 memset(&request, 0, sizeof(request));
282 request.magic = htonl(NBD_REQUEST_MAGIC);
283 request.type = htonl(type);
284 if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
285 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
286 request.len = htonl(size);
288 memcpy(request.handle, &req->tag, sizeof(req->tag));
290 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
291 cmd, nbdcmd_to_ascii(type),
292 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
293 result = sock_xmit(nbd, 1, &request, sizeof(request),
294 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
296 dev_err(disk_to_dev(nbd->disk),
297 "Send control failed (result %d)\n", result);
301 if (type != NBD_CMD_WRITE)
306 struct bio *next = bio->bi_next;
307 struct bvec_iter iter;
310 bio_for_each_segment(bvec, bio, iter) {
311 bool is_last = !next && bio_iter_last(bvec, iter);
312 int flags = is_last ? 0 : MSG_MORE;
314 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
316 result = sock_send_bvec(nbd, &bvec, flags);
318 dev_err(disk_to_dev(nbd->disk),
319 "Send data failed (result %d)\n",
324 * The completion might already have come in,
325 * so break for the last one instead of letting
326 * the iterator do it. This prevents use-after-free
337 static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
340 void *kaddr = kmap(bvec->bv_page);
341 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
343 kunmap(bvec->bv_page);
347 /* NULL returned = something went wrong, inform userspace */
348 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
351 struct nbd_reply reply;
353 struct request *req = NULL;
358 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
360 dev_err(disk_to_dev(nbd->disk),
361 "Receive control failed (result %d)\n", result);
362 return ERR_PTR(result);
365 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
366 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
367 (unsigned long)ntohl(reply.magic));
368 return ERR_PTR(-EPROTO);
371 memcpy(&tag, reply.handle, sizeof(int));
373 hwq = blk_mq_unique_tag_to_hwq(tag);
374 if (hwq < nbd->tag_set.nr_hw_queues)
375 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
376 blk_mq_unique_tag_to_tag(tag));
377 if (!req || !blk_mq_request_started(req)) {
378 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
380 return ERR_PTR(-ENOENT);
382 cmd = blk_mq_rq_to_pdu(req);
384 if (ntohl(reply.error)) {
385 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
391 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
392 if (rq_data_dir(req) != WRITE) {
393 struct req_iterator iter;
396 rq_for_each_segment(bvec, req, iter) {
397 result = sock_recv_bvec(nbd, &bvec);
399 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
404 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
411 static ssize_t pid_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
414 struct gendisk *disk = dev_to_disk(dev);
415 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
417 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
420 static struct device_attribute pid_attr = {
421 .attr = { .name = "pid", .mode = S_IRUGO},
425 static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
430 BUG_ON(nbd->magic != NBD_MAGIC);
432 sk_set_memalloc(nbd->sock->sk);
434 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
436 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
440 nbd_size_update(nbd, bdev);
443 cmd = nbd_read_stat(nbd);
449 nbd_end_request(cmd);
452 nbd_size_clear(nbd, bdev);
454 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
458 static void nbd_clear_req(struct request *req, void *data, bool reserved)
462 if (!blk_mq_request_started(req))
464 cmd = blk_mq_rq_to_pdu(req);
466 nbd_end_request(cmd);
469 static void nbd_clear_que(struct nbd_device *nbd)
471 BUG_ON(nbd->magic != NBD_MAGIC);
474 * Because we have set nbd->sock to NULL under the tx_lock, all
475 * modifications to the list must have completed by now.
479 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
480 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
484 static void nbd_handle_cmd(struct nbd_cmd *cmd)
486 struct request *req = blk_mq_rq_from_pdu(cmd);
487 struct nbd_device *nbd = cmd->nbd;
489 if (req->cmd_type != REQ_TYPE_FS)
492 if (rq_data_dir(req) == WRITE &&
493 (nbd->flags & NBD_FLAG_READ_ONLY)) {
494 dev_err(disk_to_dev(nbd->disk),
495 "Write on read-only\n");
501 mutex_lock(&nbd->tx_lock);
502 nbd->task_send = current;
503 if (unlikely(!nbd->sock)) {
504 mutex_unlock(&nbd->tx_lock);
505 dev_err(disk_to_dev(nbd->disk),
506 "Attempted send on closed socket\n");
510 if (nbd_send_cmd(nbd, cmd) != 0) {
511 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
513 nbd_end_request(cmd);
516 nbd->task_send = NULL;
517 mutex_unlock(&nbd->tx_lock);
523 nbd_end_request(cmd);
526 static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
527 const struct blk_mq_queue_data *bd)
529 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
531 blk_mq_start_request(bd->rq);
533 return BLK_MQ_RQ_QUEUE_OK;
536 static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
540 spin_lock_irq(&nbd->sock_lock);
550 spin_unlock_irq(&nbd->sock_lock);
555 /* Reset all properties of an NBD device */
556 static void nbd_reset(struct nbd_device *nbd)
558 nbd->runtime_flags = 0;
561 set_capacity(nbd->disk, 0);
563 nbd->tag_set.timeout = 0;
564 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
567 static void nbd_bdev_reset(struct block_device *bdev)
569 set_device_ro(bdev, false);
570 bdev->bd_inode->i_size = 0;
572 blkdev_reread_part(bdev);
573 bdev->bd_invalidated = 1;
577 static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
579 if (nbd->flags & NBD_FLAG_READ_ONLY)
580 set_device_ro(bdev, true);
581 if (nbd->flags & NBD_FLAG_SEND_TRIM)
582 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
583 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
584 blk_queue_write_cache(nbd->disk->queue, true, false);
586 blk_queue_write_cache(nbd->disk->queue, false, false);
589 static int nbd_dev_dbg_init(struct nbd_device *nbd);
590 static void nbd_dev_dbg_close(struct nbd_device *nbd);
592 /* Must be called with tx_lock held */
594 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
595 unsigned int cmd, unsigned long arg)
598 case NBD_DISCONNECT: {
599 struct request *sreq;
601 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
605 sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
609 mutex_unlock(&nbd->tx_lock);
611 mutex_lock(&nbd->tx_lock);
612 sreq->cmd_type = REQ_TYPE_DRV_PRIV;
614 /* Check again after getting mutex back. */
616 blk_mq_free_request(sreq);
620 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags);
622 nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq));
623 blk_mq_free_request(sreq);
635 struct socket *sock = sockfd_lookup(arg, &err);
640 err = nbd_set_socket(nbd, sock);
641 if (!err && max_part)
642 bdev->bd_invalidated = 1;
647 case NBD_SET_BLKSIZE: {
648 loff_t bsize = div_s64(nbd->bytesize, arg);
650 nbd_size_set(nbd, bdev, arg, bsize);
655 nbd_size_set(nbd, bdev, nbd->blksize,
656 div_s64(arg, nbd->blksize));
658 case NBD_SET_SIZE_BLOCKS:
659 nbd_size_set(nbd, bdev, nbd->blksize, arg);
661 case NBD_SET_TIMEOUT:
663 nbd->tag_set.timeout = arg * HZ;
664 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
680 /* We have to claim the device under the lock */
681 nbd->task_recv = current;
682 mutex_unlock(&nbd->tx_lock);
684 nbd_parse_flags(nbd, bdev);
686 nbd_dev_dbg_init(nbd);
687 error = nbd_thread_recv(nbd, bdev);
688 nbd_dev_dbg_close(nbd);
690 mutex_lock(&nbd->tx_lock);
691 nbd->task_recv = NULL;
696 nbd_bdev_reset(bdev);
698 /* user requested, ignore socket errors */
699 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
701 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
711 * This is for compatibility only. The queue is always cleared
712 * by NBD_DO_IT or NBD_CLEAR_SOCK.
716 case NBD_PRINT_DEBUG:
718 * For compatibility only, we no longer keep a list of
719 * outstanding requests.
726 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
727 unsigned int cmd, unsigned long arg)
729 struct nbd_device *nbd = bdev->bd_disk->private_data;
732 if (!capable(CAP_SYS_ADMIN))
735 BUG_ON(nbd->magic != NBD_MAGIC);
737 mutex_lock(&nbd->tx_lock);
738 error = __nbd_ioctl(bdev, nbd, cmd, arg);
739 mutex_unlock(&nbd->tx_lock);
744 static const struct block_device_operations nbd_fops =
746 .owner = THIS_MODULE,
748 .compat_ioctl = nbd_ioctl,
751 #if IS_ENABLED(CONFIG_DEBUG_FS)
753 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
755 struct nbd_device *nbd = s->private;
758 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
760 seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
765 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
767 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
770 static const struct file_operations nbd_dbg_tasks_ops = {
771 .open = nbd_dbg_tasks_open,
774 .release = single_release,
777 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
779 struct nbd_device *nbd = s->private;
780 u32 flags = nbd->flags;
782 seq_printf(s, "Hex: 0x%08x\n\n", flags);
784 seq_puts(s, "Known flags:\n");
786 if (flags & NBD_FLAG_HAS_FLAGS)
787 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
788 if (flags & NBD_FLAG_READ_ONLY)
789 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
790 if (flags & NBD_FLAG_SEND_FLUSH)
791 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
792 if (flags & NBD_FLAG_SEND_TRIM)
793 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
798 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
800 return single_open(file, nbd_dbg_flags_show, inode->i_private);
803 static const struct file_operations nbd_dbg_flags_ops = {
804 .open = nbd_dbg_flags_open,
807 .release = single_release,
810 static int nbd_dev_dbg_init(struct nbd_device *nbd)
817 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
819 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
825 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
826 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
827 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
828 debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
829 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
834 static void nbd_dev_dbg_close(struct nbd_device *nbd)
836 debugfs_remove_recursive(nbd->dbg_dir);
839 static int nbd_dbg_init(void)
841 struct dentry *dbg_dir;
843 dbg_dir = debugfs_create_dir("nbd", NULL);
847 nbd_dbg_dir = dbg_dir;
852 static void nbd_dbg_close(void)
854 debugfs_remove_recursive(nbd_dbg_dir);
857 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
859 static int nbd_dev_dbg_init(struct nbd_device *nbd)
864 static void nbd_dev_dbg_close(struct nbd_device *nbd)
868 static int nbd_dbg_init(void)
873 static void nbd_dbg_close(void)
879 static int nbd_init_request(void *data, struct request *rq,
880 unsigned int hctx_idx, unsigned int request_idx,
881 unsigned int numa_node)
883 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
886 INIT_LIST_HEAD(&cmd->list);
890 static struct blk_mq_ops nbd_mq_ops = {
891 .queue_rq = nbd_queue_rq,
892 .init_request = nbd_init_request,
893 .timeout = nbd_xmit_timeout,
897 * And here should be modules and kernel interface
898 * (Just smiley confuses emacs :-)
901 static int __init nbd_init(void)
907 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
910 printk(KERN_ERR "nbd: max_part must be >= 0\n");
916 part_shift = fls(max_part);
919 * Adjust max_part according to part_shift as it is exported
920 * to user space so that user can know the max number of
921 * partition kernel should be able to manage.
923 * Note that -1 is required because partition 0 is reserved
924 * for the whole disk.
926 max_part = (1UL << part_shift) - 1;
929 if ((1UL << part_shift) > DISK_MAX_PARTS)
932 if (nbds_max > 1UL << (MINORBITS - part_shift))
935 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
939 for (i = 0; i < nbds_max; i++) {
940 struct request_queue *q;
941 struct gendisk *disk = alloc_disk(1 << part_shift);
944 nbd_dev[i].disk = disk;
946 nbd_dev[i].tag_set.ops = &nbd_mq_ops;
947 nbd_dev[i].tag_set.nr_hw_queues = 1;
948 nbd_dev[i].tag_set.queue_depth = 128;
949 nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
950 nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
951 nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
952 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
953 nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
955 err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
962 * The new linux 2.5 block layer implementation requires
963 * every gendisk to have its very own request_queue struct.
964 * These structs are big so we dynamically allocate them.
966 q = blk_mq_init_queue(&nbd_dev[i].tag_set);
968 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
975 * Tell the block layer that we are not a rotational device
977 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
978 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
979 disk->queue->limits.discard_granularity = 512;
980 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
981 disk->queue->limits.discard_zeroes_data = 0;
982 blk_queue_max_hw_sectors(disk->queue, 65536);
983 disk->queue->limits.max_sectors = 256;
986 if (register_blkdev(NBD_MAJOR, "nbd")) {
991 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
995 for (i = 0; i < nbds_max; i++) {
996 struct gendisk *disk = nbd_dev[i].disk;
997 nbd_dev[i].magic = NBD_MAGIC;
998 spin_lock_init(&nbd_dev[i].sock_lock);
999 mutex_init(&nbd_dev[i].tx_lock);
1000 disk->major = NBD_MAJOR;
1001 disk->first_minor = i << part_shift;
1002 disk->fops = &nbd_fops;
1003 disk->private_data = &nbd_dev[i];
1004 sprintf(disk->disk_name, "nbd%d", i);
1005 nbd_reset(&nbd_dev[i]);
1012 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1013 blk_cleanup_queue(nbd_dev[i].disk->queue);
1014 put_disk(nbd_dev[i].disk);
1020 static void __exit nbd_cleanup(void)
1026 for (i = 0; i < nbds_max; i++) {
1027 struct gendisk *disk = nbd_dev[i].disk;
1028 nbd_dev[i].magic = 0;
1031 blk_cleanup_queue(disk->queue);
1032 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1036 unregister_blkdev(NBD_MAJOR, "nbd");
1038 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1041 module_init(nbd_init);
1042 module_exit(nbd_cleanup);
1044 MODULE_DESCRIPTION("Network Block Device");
1045 MODULE_LICENSE("GPL");
1047 module_param(nbds_max, int, 0444);
1048 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1049 module_param(max_part, int, 0444);
1050 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");