1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Userspace block device - block device which IO is handled from userspace
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
10 * (part of code stolen from loop.c)
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
44 #include <linux/task_work.h>
45 #include <uapi/linux/ublk_cmd.h>
47 #define UBLK_MINORS (1U << MINORBITS)
49 /* All UBLK_F_* have to be included into UBLK_F_ALL */
50 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
51 | UBLK_F_URING_CMD_COMP_IN_TASK \
52 | UBLK_F_NEED_GET_DATA \
53 | UBLK_F_USER_RECOVERY \
54 | UBLK_F_USER_RECOVERY_REISSUE)
56 /* All UBLK_PARAM_TYPE_* should be included here */
57 #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
60 struct llist_node node;
61 struct callback_head work;
64 struct ublk_uring_cmd_pdu {
65 struct ublk_queue *ubq;
69 * io command is active: sqe cmd is received, and its cqe isn't done
71 * If the flag is set, the io command is owned by ublk driver, and waited
72 * for incoming blk-mq request from the ublk block device.
74 * If the flag is cleared, the io command will be completed, and owned by
77 #define UBLK_IO_FLAG_ACTIVE 0x01
80 * IO command is completed via cqe, and it is being handled by ublksrv, and
83 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
86 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
89 * IO command is aborted, so this flag is set in case of
90 * !UBLK_IO_FLAG_ACTIVE.
92 * After this flag is observed, any pending or new incoming request
93 * associated with this io command will be failed immediately
95 #define UBLK_IO_FLAG_ABORTED 0x04
98 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
99 * get data buffer address from ublksrv.
101 * Then, bio data could be copied into this data buffer for a WRITE request
102 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
104 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
107 /* userspace buffer address from io cmd */
112 struct io_uring_cmd *cmd;
120 struct task_struct *ubq_daemon;
123 struct llist_head io_cmds;
125 unsigned long io_addr; /* mapped vm address */
126 unsigned int max_io_sz;
128 unsigned short nr_io_ready; /* how many ios setup */
129 struct ublk_device *dev;
130 struct ublk_io ios[];
133 #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
136 struct gendisk *ub_disk;
140 unsigned int queue_size;
141 struct ublksrv_ctrl_dev_info dev_info;
143 struct blk_mq_tag_set tag_set;
146 struct device cdev_dev;
148 #define UB_STATE_OPEN 0
149 #define UB_STATE_USED 1
156 struct mm_struct *mm;
158 struct ublk_params params;
160 struct completion completion;
161 unsigned int nr_queues_ready;
162 unsigned int nr_privileged_daemon;
165 * Our ubq->daemon may be killed without any notification, so
166 * monitor each queue's daemon periodically
168 struct delayed_work monitor_work;
169 struct work_struct quiesce_work;
170 struct work_struct stop_work;
173 /* header of ublk_params */
174 struct ublk_params_header {
179 static dev_t ublk_chr_devt;
180 static struct class *ublk_chr_class;
182 static DEFINE_IDR(ublk_index_idr);
183 static DEFINE_SPINLOCK(ublk_idr_lock);
184 static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
186 static DEFINE_MUTEX(ublk_ctl_mutex);
188 static struct miscdevice ublk_misc;
190 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
192 struct request_queue *q = ub->ub_disk->queue;
193 const struct ublk_param_basic *p = &ub->params.basic;
195 blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
196 blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
197 blk_queue_io_min(q, 1 << p->io_min_shift);
198 blk_queue_io_opt(q, 1 << p->io_opt_shift);
200 blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
201 p->attrs & UBLK_ATTR_FUA);
202 if (p->attrs & UBLK_ATTR_ROTATIONAL)
203 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
205 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
207 blk_queue_max_hw_sectors(q, p->max_sectors);
208 blk_queue_chunk_sectors(q, p->chunk_sectors);
209 blk_queue_virt_boundary(q, p->virt_boundary_mask);
211 if (p->attrs & UBLK_ATTR_READ_ONLY)
212 set_disk_ro(ub->ub_disk, true);
214 set_capacity(ub->ub_disk, p->dev_sectors);
217 static void ublk_dev_param_discard_apply(struct ublk_device *ub)
219 struct request_queue *q = ub->ub_disk->queue;
220 const struct ublk_param_discard *p = &ub->params.discard;
222 q->limits.discard_alignment = p->discard_alignment;
223 q->limits.discard_granularity = p->discard_granularity;
224 blk_queue_max_discard_sectors(q, p->max_discard_sectors);
225 blk_queue_max_write_zeroes_sectors(q,
226 p->max_write_zeroes_sectors);
227 blk_queue_max_discard_segments(q, p->max_discard_segments);
230 static int ublk_validate_params(const struct ublk_device *ub)
232 /* basic param is the only one which must be set */
233 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
234 const struct ublk_param_basic *p = &ub->params.basic;
236 if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
239 if (p->logical_bs_shift > p->physical_bs_shift)
242 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
247 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
248 const struct ublk_param_discard *p = &ub->params.discard;
250 /* So far, only support single segment discard */
251 if (p->max_discard_sectors && p->max_discard_segments != 1)
254 if (!p->discard_granularity)
261 static int ublk_apply_params(struct ublk_device *ub)
263 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
266 ublk_dev_param_basic_apply(ub);
268 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
269 ublk_dev_param_discard_apply(ub);
274 static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
276 if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
277 !(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK))
282 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
284 if (ubq->flags & UBLK_F_NEED_GET_DATA)
289 static struct ublk_device *ublk_get_device(struct ublk_device *ub)
291 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
296 static void ublk_put_device(struct ublk_device *ub)
298 put_device(&ub->cdev_dev);
301 static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
304 return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
307 static inline bool ublk_rq_has_data(const struct request *rq)
309 return rq->bio && bio_has_data(rq->bio);
312 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
315 return (struct ublksrv_io_desc *)
316 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
319 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
321 return ublk_get_queue(ub, q_id)->io_cmd_buf;
324 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
326 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
328 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
332 static inline bool ublk_queue_can_use_recovery_reissue(
333 struct ublk_queue *ubq)
335 if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
336 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
341 static inline bool ublk_queue_can_use_recovery(
342 struct ublk_queue *ubq)
344 if (ubq->flags & UBLK_F_USER_RECOVERY)
349 static inline bool ublk_can_use_recovery(struct ublk_device *ub)
351 if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
356 static void ublk_free_disk(struct gendisk *disk)
358 struct ublk_device *ub = disk->private_data;
360 clear_bit(UB_STATE_USED, &ub->state);
361 put_device(&ub->cdev_dev);
364 static const struct block_device_operations ub_fops = {
365 .owner = THIS_MODULE,
366 .free_disk = ublk_free_disk,
369 #define UBLK_MAX_PIN_PAGES 32
371 struct ublk_map_data {
372 const struct ublk_queue *ubq;
373 const struct request *rq;
374 const struct ublk_io *io;
378 struct ublk_io_iter {
379 struct page *pages[UBLK_MAX_PIN_PAGES];
380 unsigned pg_off; /* offset in the 1st page in pages */
381 int nr_pages; /* how many page pointers in pages */
383 struct bvec_iter iter;
386 static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
387 unsigned max_bytes, bool to_vm)
389 const unsigned total = min_t(unsigned, max_bytes,
390 PAGE_SIZE - data->pg_off +
391 ((data->nr_pages - 1) << PAGE_SHIFT));
395 while (done < total) {
396 struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
397 const unsigned int bytes = min3(bv.bv_len, total - done,
398 (unsigned)(PAGE_SIZE - data->pg_off));
399 void *bv_buf = bvec_kmap_local(&bv);
400 void *pg_buf = kmap_local_page(data->pages[pg_idx]);
403 memcpy(pg_buf + data->pg_off, bv_buf, bytes);
405 memcpy(bv_buf, pg_buf + data->pg_off, bytes);
407 kunmap_local(pg_buf);
408 kunmap_local(bv_buf);
410 /* advance page array */
411 data->pg_off += bytes;
412 if (data->pg_off == PAGE_SIZE) {
420 bio_advance_iter_single(data->bio, &data->iter, bytes);
421 if (!data->iter.bi_size) {
422 data->bio = data->bio->bi_next;
423 if (data->bio == NULL)
425 data->iter = data->bio->bi_iter;
432 static inline int ublk_copy_user_pages(struct ublk_map_data *data,
435 const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
436 const unsigned long start_vm = data->io->addr;
437 unsigned int done = 0;
438 struct ublk_io_iter iter = {
439 .pg_off = start_vm & (PAGE_SIZE - 1),
440 .bio = data->rq->bio,
441 .iter = data->rq->bio->bi_iter,
443 const unsigned int nr_pages = round_up(data->max_bytes +
444 (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
446 while (done < nr_pages) {
447 const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
451 iter.nr_pages = get_user_pages_fast(start_vm +
452 (done << PAGE_SHIFT), to_pin, gup_flags,
454 if (iter.nr_pages <= 0)
455 return done == 0 ? iter.nr_pages : done;
456 len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
457 for (i = 0; i < iter.nr_pages; i++) {
459 set_page_dirty(iter.pages[i]);
460 put_page(iter.pages[i]);
462 data->max_bytes -= len;
463 done += iter.nr_pages;
469 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
472 const unsigned int rq_bytes = blk_rq_bytes(req);
474 * no zero copy, we delay copy WRITE request data into ublksrv
475 * context and the big benefit is that pinning pages in current
476 * context is pretty fast, see ublk_pin_user_pages
478 if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
481 if (ublk_rq_has_data(req)) {
482 struct ublk_map_data data = {
486 .max_bytes = rq_bytes,
489 ublk_copy_user_pages(&data, true);
491 return rq_bytes - data.max_bytes;
496 static int ublk_unmap_io(const struct ublk_queue *ubq,
497 const struct request *req,
500 const unsigned int rq_bytes = blk_rq_bytes(req);
502 if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
503 struct ublk_map_data data = {
507 .max_bytes = io->res,
510 WARN_ON_ONCE(io->res > rq_bytes);
512 ublk_copy_user_pages(&data, false);
514 return io->res - data.max_bytes;
519 static inline unsigned int ublk_req_build_flags(struct request *req)
523 if (req->cmd_flags & REQ_FAILFAST_DEV)
524 flags |= UBLK_IO_F_FAILFAST_DEV;
526 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
527 flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
529 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
530 flags |= UBLK_IO_F_FAILFAST_DRIVER;
532 if (req->cmd_flags & REQ_META)
533 flags |= UBLK_IO_F_META;
535 if (req->cmd_flags & REQ_FUA)
536 flags |= UBLK_IO_F_FUA;
538 if (req->cmd_flags & REQ_NOUNMAP)
539 flags |= UBLK_IO_F_NOUNMAP;
541 if (req->cmd_flags & REQ_SWAP)
542 flags |= UBLK_IO_F_SWAP;
547 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
549 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
550 struct ublk_io *io = &ubq->ios[req->tag];
553 switch (req_op(req)) {
555 ublk_op = UBLK_IO_OP_READ;
558 ublk_op = UBLK_IO_OP_WRITE;
561 ublk_op = UBLK_IO_OP_FLUSH;
564 ublk_op = UBLK_IO_OP_DISCARD;
566 case REQ_OP_WRITE_ZEROES:
567 ublk_op = UBLK_IO_OP_WRITE_ZEROES;
570 return BLK_STS_IOERR;
573 /* need to translate since kernel may change */
574 iod->op_flags = ublk_op | ublk_req_build_flags(req);
575 iod->nr_sectors = blk_rq_sectors(req);
576 iod->start_sector = blk_rq_pos(req);
577 iod->addr = io->addr;
582 static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
583 struct io_uring_cmd *ioucmd)
585 return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
588 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
590 return ubq->ubq_daemon->flags & PF_EXITING;
593 /* todo: handle partial completion */
594 static void ublk_complete_rq(struct request *req)
596 struct ublk_queue *ubq = req->mq_hctx->driver_data;
597 struct ublk_io *io = &ubq->ios[req->tag];
598 unsigned int unmapped_bytes;
600 /* failed read IO if nothing is read */
601 if (!io->res && req_op(req) == REQ_OP_READ)
605 blk_mq_end_request(req, errno_to_blk_status(io->res));
610 * FLUSH or DISCARD usually won't return bytes returned, so end them
613 * Both the two needn't unmap.
615 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
616 blk_mq_end_request(req, BLK_STS_OK);
620 /* for READ request, writing data in iod->addr to rq buffers */
621 unmapped_bytes = ublk_unmap_io(ubq, req, io);
624 * Extremely impossible since we got data filled in just before
626 * Re-read simply for this unlikely case.
628 if (unlikely(unmapped_bytes < io->res))
629 io->res = unmapped_bytes;
631 if (blk_update_request(req, BLK_STS_OK, io->res))
632 blk_mq_requeue_request(req, true);
634 __blk_mq_end_request(req, BLK_STS_OK);
638 * Since __ublk_rq_task_work always fails requests immediately during
639 * exiting, __ublk_fail_req() is only called from abort context during
640 * exiting. So lock is unnecessary.
642 * Also aborting may not be started yet, keep in mind that one failed
643 * request may be issued by block layer again.
645 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
648 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
650 if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
651 io->flags |= UBLK_IO_FLAG_ABORTED;
652 if (ublk_queue_can_use_recovery_reissue(ubq))
653 blk_mq_requeue_request(req, false);
655 blk_mq_end_request(req, BLK_STS_IOERR);
659 static void ubq_complete_io_cmd(struct ublk_io *io, int res,
660 unsigned issue_flags)
662 /* mark this cmd owned by ublksrv */
663 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
666 * clear ACTIVE since we are done with this sqe/cmd slot
667 * We can only accept io cmd in case of being not active.
669 io->flags &= ~UBLK_IO_FLAG_ACTIVE;
671 /* tell ublksrv one io request is coming */
672 io_uring_cmd_done(io->cmd, res, 0, issue_flags);
675 #define UBLK_REQUEUE_DELAY_MS 3
677 static inline void __ublk_abort_rq(struct ublk_queue *ubq,
680 /* We cannot process this rq so just requeue it. */
681 if (ublk_queue_can_use_recovery(ubq))
682 blk_mq_requeue_request(rq, false);
684 blk_mq_end_request(rq, BLK_STS_IOERR);
686 mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
689 static inline void __ublk_rq_task_work(struct request *req,
690 unsigned issue_flags)
692 struct ublk_queue *ubq = req->mq_hctx->driver_data;
694 struct ublk_io *io = &ubq->ios[tag];
695 unsigned int mapped_bytes;
697 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
698 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
699 ublk_get_iod(ubq, req->tag)->addr);
702 * Task is exiting if either:
704 * (1) current != ubq_daemon.
705 * io_uring_cmd_complete_in_task() tries to run task_work
706 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
708 * (2) current->flags & PF_EXITING.
710 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
711 __ublk_abort_rq(ubq, req);
715 if (ublk_need_get_data(ubq) &&
716 (req_op(req) == REQ_OP_WRITE ||
717 req_op(req) == REQ_OP_FLUSH)) {
719 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
720 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
723 if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
724 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
725 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
726 __func__, io->cmd->cmd_op, ubq->q_id,
727 req->tag, io->flags);
728 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
732 * We have handled UBLK_IO_NEED_GET_DATA command,
733 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
736 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
737 /* update iod->addr because ublksrv may have passed a new io buffer */
738 ublk_get_iod(ubq, req->tag)->addr = io->addr;
739 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
740 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
741 ublk_get_iod(ubq, req->tag)->addr);
744 mapped_bytes = ublk_map_io(ubq, req, io);
746 /* partially mapped, update io descriptor */
747 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
749 * Nothing mapped, retry until we succeed.
751 * We may never succeed in mapping any bytes here because
752 * of OOM. TODO: reserve one buffer with single page pinned
753 * for providing forward progress guarantee.
755 if (unlikely(!mapped_bytes)) {
756 blk_mq_requeue_request(req, false);
757 blk_mq_delay_kick_requeue_list(req->q,
758 UBLK_REQUEUE_DELAY_MS);
762 ublk_get_iod(ubq, req->tag)->nr_sectors =
766 ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
769 static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
770 unsigned issue_flags)
772 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
773 struct ublk_rq_data *data, *tmp;
775 io_cmds = llist_reverse_order(io_cmds);
776 llist_for_each_entry_safe(data, tmp, io_cmds, node)
777 __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
780 static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
782 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
783 struct ublk_rq_data *data, *tmp;
785 llist_for_each_entry_safe(data, tmp, io_cmds, node)
786 __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
789 static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
791 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
792 struct ublk_queue *ubq = pdu->ubq;
794 ublk_forward_io_cmds(ubq, issue_flags);
797 static void ublk_rq_task_work_fn(struct callback_head *work)
799 struct ublk_rq_data *data = container_of(work,
800 struct ublk_rq_data, work);
801 struct request *req = blk_mq_rq_from_pdu(data);
802 struct ublk_queue *ubq = req->mq_hctx->driver_data;
803 unsigned issue_flags = IO_URING_F_UNLOCKED;
805 ublk_forward_io_cmds(ubq, issue_flags);
808 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
810 struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
813 if (!llist_add(&data->node, &ubq->io_cmds))
816 io = &ubq->ios[rq->tag];
818 * If the check pass, we know that this is a re-issued request aborted
819 * previously in monitor_work because the ubq_daemon(cmd's task) is
820 * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
821 * because this ioucmd's io_uring context may be freed now if no inflight
822 * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
824 * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
825 * the tag). Then the request is re-started(allocating the tag) and we are here.
826 * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
827 * guarantees that here is a re-issued request aborted previously.
829 if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
830 ublk_abort_io_cmds(ubq);
831 } else if (ublk_can_use_task_work(ubq)) {
832 if (task_work_add(ubq->ubq_daemon, &data->work,
834 ublk_abort_io_cmds(ubq);
836 struct io_uring_cmd *cmd = io->cmd;
837 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
840 io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
844 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
845 const struct blk_mq_queue_data *bd)
847 struct ublk_queue *ubq = hctx->driver_data;
848 struct request *rq = bd->rq;
851 /* fill iod to slot in io cmd buffer */
852 res = ublk_setup_iod(ubq, rq);
853 if (unlikely(res != BLK_STS_OK))
854 return BLK_STS_IOERR;
856 /* With recovery feature enabled, force_abort is set in
857 * ublk_stop_dev() before calling del_gendisk(). We have to
858 * abort all requeued and new rqs here to let del_gendisk()
859 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
860 * to avoid UAF on io_uring ctx.
862 * Note: force_abort is guaranteed to be seen because it is set
863 * before request queue is unqiuesced.
865 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
866 return BLK_STS_IOERR;
868 blk_mq_start_request(bd->rq);
870 if (unlikely(ubq_daemon_is_dying(ubq))) {
871 __ublk_abort_rq(ubq, rq);
875 ublk_queue_cmd(ubq, rq);
880 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
881 unsigned int hctx_idx)
883 struct ublk_device *ub = driver_data;
884 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
886 hctx->driver_data = ubq;
890 static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
891 unsigned int hctx_idx, unsigned int numa_node)
893 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
895 init_task_work(&data->work, ublk_rq_task_work_fn);
899 static const struct blk_mq_ops ublk_mq_ops = {
900 .queue_rq = ublk_queue_rq,
901 .init_hctx = ublk_init_hctx,
902 .init_request = ublk_init_rq,
905 static int ublk_ch_open(struct inode *inode, struct file *filp)
907 struct ublk_device *ub = container_of(inode->i_cdev,
908 struct ublk_device, cdev);
910 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
912 filp->private_data = ub;
916 static int ublk_ch_release(struct inode *inode, struct file *filp)
918 struct ublk_device *ub = filp->private_data;
920 clear_bit(UB_STATE_OPEN, &ub->state);
924 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
925 static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
927 struct ublk_device *ub = filp->private_data;
928 size_t sz = vma->vm_end - vma->vm_start;
929 unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
930 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
933 spin_lock(&ub->mm_lock);
935 ub->mm = current->mm;
936 if (current->mm != ub->mm)
938 spin_unlock(&ub->mm_lock);
943 if (vma->vm_flags & VM_WRITE)
946 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
947 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
950 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
951 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
952 __func__, q_id, current->pid, vma->vm_start,
953 phys_off, (unsigned long)sz);
955 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
958 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
959 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
962 static void ublk_commit_completion(struct ublk_device *ub,
963 struct ublksrv_io_cmd *ub_cmd)
965 u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
966 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
967 struct ublk_io *io = &ubq->ios[tag];
970 /* now this cmd slot is owned by nbd driver */
971 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
972 io->res = ub_cmd->result;
974 /* find the io request and complete */
975 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
977 if (req && likely(!blk_should_fake_timeout(req->q)))
978 ublk_complete_rq(req);
982 * When ->ubq_daemon is exiting, either new request is ended immediately,
983 * or any queued io command is drained, so it is safe to abort queue
986 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
990 if (!ublk_get_device(ub))
993 for (i = 0; i < ubq->q_depth; i++) {
994 struct ublk_io *io = &ubq->ios[i];
996 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1000 * Either we fail the request or ublk_rq_task_work_fn
1003 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1005 __ublk_fail_req(ubq, io, rq);
1008 ublk_put_device(ub);
1011 static void ublk_daemon_monitor_work(struct work_struct *work)
1013 struct ublk_device *ub =
1014 container_of(work, struct ublk_device, monitor_work.work);
1017 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1018 struct ublk_queue *ubq = ublk_get_queue(ub, i);
1020 if (ubq_daemon_is_dying(ubq)) {
1021 if (ublk_queue_can_use_recovery(ubq))
1022 schedule_work(&ub->quiesce_work);
1024 schedule_work(&ub->stop_work);
1026 /* abort queue is for making forward progress */
1027 ublk_abort_queue(ub, ubq);
1032 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1033 * after ublk_remove() or __ublk_quiesce_dev() is started.
1035 * No need ub->mutex, monitor work are canceled after state is marked
1036 * as not LIVE, so new state is observed reliably.
1038 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1039 schedule_delayed_work(&ub->monitor_work,
1040 UBLK_DAEMON_MONITOR_PERIOD);
1043 static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1045 return ubq->nr_io_ready == ubq->q_depth;
1048 static void ublk_cancel_queue(struct ublk_queue *ubq)
1052 if (!ublk_queue_ready(ubq))
1055 for (i = 0; i < ubq->q_depth; i++) {
1056 struct ublk_io *io = &ubq->ios[i];
1058 if (io->flags & UBLK_IO_FLAG_ACTIVE)
1059 io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
1060 IO_URING_F_UNLOCKED);
1063 /* all io commands are canceled */
1064 ubq->nr_io_ready = 0;
1067 /* Cancel all pending commands, must be called after del_gendisk() returns */
1068 static void ublk_cancel_dev(struct ublk_device *ub)
1072 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1073 ublk_cancel_queue(ublk_get_queue(ub, i));
1076 static bool ublk_check_inflight_rq(struct request *rq, void *data)
1080 if (blk_mq_request_started(rq)) {
1087 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1091 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1094 blk_mq_tagset_busy_iter(&ub->tag_set,
1095 ublk_check_inflight_rq, &idle);
1098 msleep(UBLK_REQUEUE_DELAY_MS);
1102 static void __ublk_quiesce_dev(struct ublk_device *ub)
1104 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1105 __func__, ub->dev_info.dev_id,
1106 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1107 "LIVE" : "QUIESCED");
1108 blk_mq_quiesce_queue(ub->ub_disk->queue);
1109 ublk_wait_tagset_rqs_idle(ub);
1110 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1111 ublk_cancel_dev(ub);
1112 /* we are going to release task_struct of ubq_daemon and resets
1113 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
1114 * Besides, monitor_work is not necessary in QUIESCED state since we have
1115 * already scheduled quiesce_work and quiesced all ubqs.
1117 * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
1118 * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
1120 cancel_delayed_work_sync(&ub->monitor_work);
1123 static void ublk_quiesce_work_fn(struct work_struct *work)
1125 struct ublk_device *ub =
1126 container_of(work, struct ublk_device, quiesce_work);
1128 mutex_lock(&ub->mutex);
1129 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1131 __ublk_quiesce_dev(ub);
1133 mutex_unlock(&ub->mutex);
1136 static void ublk_unquiesce_dev(struct ublk_device *ub)
1140 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1141 __func__, ub->dev_info.dev_id,
1142 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1143 "LIVE" : "QUIESCED");
1144 /* quiesce_work has run. We let requeued rqs be aborted
1145 * before running fallback_wq. "force_abort" must be seen
1146 * after request queue is unqiuesced. Then del_gendisk()
1149 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1150 ublk_get_queue(ub, i)->force_abort = true;
1152 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1153 /* We may have requeued some rqs in ublk_quiesce_queue() */
1154 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1157 static void ublk_stop_dev(struct ublk_device *ub)
1159 mutex_lock(&ub->mutex);
1160 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1162 if (ublk_can_use_recovery(ub)) {
1163 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1164 __ublk_quiesce_dev(ub);
1165 ublk_unquiesce_dev(ub);
1167 del_gendisk(ub->ub_disk);
1168 ub->dev_info.state = UBLK_S_DEV_DEAD;
1169 ub->dev_info.ublksrv_pid = -1;
1170 put_disk(ub->ub_disk);
1173 ublk_cancel_dev(ub);
1174 mutex_unlock(&ub->mutex);
1175 cancel_delayed_work_sync(&ub->monitor_work);
1178 /* device can only be started after all IOs are ready */
1179 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1181 mutex_lock(&ub->mutex);
1183 if (ublk_queue_ready(ubq)) {
1184 ubq->ubq_daemon = current;
1185 get_task_struct(ubq->ubq_daemon);
1186 ub->nr_queues_ready++;
1188 if (capable(CAP_SYS_ADMIN))
1189 ub->nr_privileged_daemon++;
1191 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1192 complete_all(&ub->completion);
1193 mutex_unlock(&ub->mutex);
1196 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1199 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1200 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1202 ublk_queue_cmd(ubq, req);
1205 static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
1206 unsigned int issue_flags,
1207 struct ublksrv_io_cmd *ub_cmd)
1209 struct ublk_device *ub = cmd->file->private_data;
1210 struct ublk_queue *ubq;
1212 u32 cmd_op = cmd->cmd_op;
1213 unsigned tag = ub_cmd->tag;
1215 struct request *req;
1217 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1218 __func__, cmd->cmd_op, ub_cmd->q_id, tag,
1221 if (!(issue_flags & IO_URING_F_SQE128))
1224 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1227 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1228 if (!ubq || ub_cmd->q_id != ubq->q_id)
1231 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1234 if (tag >= ubq->q_depth)
1237 io = &ubq->ios[tag];
1239 /* there is pending io cmd, something must be wrong */
1240 if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1246 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1247 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1249 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1250 ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
1254 case UBLK_IO_FETCH_REQ:
1255 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1256 if (ublk_queue_ready(ubq)) {
1261 * The io is being handled by server, so COMMIT_RQ is expected
1262 * instead of FETCH_REQ
1264 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1266 /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
1267 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1270 io->flags |= UBLK_IO_FLAG_ACTIVE;
1271 io->addr = ub_cmd->addr;
1273 ublk_mark_io_ready(ub, ubq);
1275 case UBLK_IO_COMMIT_AND_FETCH_REQ:
1276 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1278 * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
1279 * not enabled or it is Read IO.
1281 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
1283 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1285 io->addr = ub_cmd->addr;
1286 io->flags |= UBLK_IO_FLAG_ACTIVE;
1288 ublk_commit_completion(ub, ub_cmd);
1290 case UBLK_IO_NEED_GET_DATA:
1291 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1293 io->addr = ub_cmd->addr;
1295 io->flags |= UBLK_IO_FLAG_ACTIVE;
1296 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
1301 return -EIOCBQUEUED;
1304 io_uring_cmd_done(cmd, ret, 0, issue_flags);
1305 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1306 __func__, cmd_op, tag, ret, io->flags);
1307 return -EIOCBQUEUED;
1310 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1312 struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
1313 struct ublksrv_io_cmd ub_cmd;
1316 * Not necessary for async retry, but let's keep it simple and always
1317 * copy the values to avoid any potential reuse.
1319 ub_cmd.q_id = READ_ONCE(ub_src->q_id);
1320 ub_cmd.tag = READ_ONCE(ub_src->tag);
1321 ub_cmd.result = READ_ONCE(ub_src->result);
1322 ub_cmd.addr = READ_ONCE(ub_src->addr);
1324 return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
1327 static const struct file_operations ublk_ch_fops = {
1328 .owner = THIS_MODULE,
1329 .open = ublk_ch_open,
1330 .release = ublk_ch_release,
1331 .llseek = no_llseek,
1332 .uring_cmd = ublk_ch_uring_cmd,
1333 .mmap = ublk_ch_mmap,
1336 static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
1338 int size = ublk_queue_cmd_buf_size(ub, q_id);
1339 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1341 if (ubq->ubq_daemon)
1342 put_task_struct(ubq->ubq_daemon);
1343 if (ubq->io_cmd_buf)
1344 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
1347 static int ublk_init_queue(struct ublk_device *ub, int q_id)
1349 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1350 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
1354 ubq->flags = ub->dev_info.flags;
1356 ubq->q_depth = ub->dev_info.queue_depth;
1357 size = ublk_queue_cmd_buf_size(ub, q_id);
1359 ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
1363 ubq->io_cmd_buf = ptr;
1368 static void ublk_deinit_queues(struct ublk_device *ub)
1370 int nr_queues = ub->dev_info.nr_hw_queues;
1376 for (i = 0; i < nr_queues; i++)
1377 ublk_deinit_queue(ub, i);
1378 kfree(ub->__queues);
1381 static int ublk_init_queues(struct ublk_device *ub)
1383 int nr_queues = ub->dev_info.nr_hw_queues;
1384 int depth = ub->dev_info.queue_depth;
1385 int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
1386 int i, ret = -ENOMEM;
1388 ub->queue_size = ubq_size;
1389 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
1393 for (i = 0; i < nr_queues; i++) {
1394 if (ublk_init_queue(ub, i))
1398 init_completion(&ub->completion);
1402 ublk_deinit_queues(ub);
1406 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
1411 spin_lock(&ublk_idr_lock);
1412 /* allocate id, if @id >= 0, we're requesting that specific id */
1414 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
1418 err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
1420 spin_unlock(&ublk_idr_lock);
1423 ub->ub_number = err;
1428 static void ublk_free_dev_number(struct ublk_device *ub)
1430 spin_lock(&ublk_idr_lock);
1431 idr_remove(&ublk_index_idr, ub->ub_number);
1432 wake_up_all(&ublk_idr_wq);
1433 spin_unlock(&ublk_idr_lock);
1436 static void ublk_cdev_rel(struct device *dev)
1438 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
1440 blk_mq_free_tag_set(&ub->tag_set);
1441 ublk_deinit_queues(ub);
1442 ublk_free_dev_number(ub);
1443 mutex_destroy(&ub->mutex);
1447 static int ublk_add_chdev(struct ublk_device *ub)
1449 struct device *dev = &ub->cdev_dev;
1450 int minor = ub->ub_number;
1453 dev->parent = ublk_misc.this_device;
1454 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
1455 dev->class = ublk_chr_class;
1456 dev->release = ublk_cdev_rel;
1457 device_initialize(dev);
1459 ret = dev_set_name(dev, "ublkc%d", minor);
1463 cdev_init(&ub->cdev, &ublk_ch_fops);
1464 ret = cdev_device_add(&ub->cdev, dev);
1473 static void ublk_stop_work_fn(struct work_struct *work)
1475 struct ublk_device *ub =
1476 container_of(work, struct ublk_device, stop_work);
1481 /* align max io buffer size with PAGE_SIZE */
1482 static void ublk_align_max_io_size(struct ublk_device *ub)
1484 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
1486 ub->dev_info.max_io_buf_bytes =
1487 round_down(max_io_bytes, PAGE_SIZE);
1490 static int ublk_add_tag_set(struct ublk_device *ub)
1492 ub->tag_set.ops = &ublk_mq_ops;
1493 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
1494 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
1495 ub->tag_set.numa_node = NUMA_NO_NODE;
1496 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
1497 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1498 ub->tag_set.driver_data = ub;
1499 return blk_mq_alloc_tag_set(&ub->tag_set);
1502 static void ublk_remove(struct ublk_device *ub)
1505 cancel_work_sync(&ub->stop_work);
1506 cancel_work_sync(&ub->quiesce_work);
1507 cdev_device_del(&ub->cdev, &ub->cdev_dev);
1508 put_device(&ub->cdev_dev);
1511 static struct ublk_device *ublk_get_device_from_id(int idx)
1513 struct ublk_device *ub = NULL;
1518 spin_lock(&ublk_idr_lock);
1519 ub = idr_find(&ublk_index_idr, idx);
1521 ub = ublk_get_device(ub);
1522 spin_unlock(&ublk_idr_lock);
1527 static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
1529 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1530 int ublksrv_pid = (int)header->data[0];
1531 struct ublk_device *ub;
1532 struct gendisk *disk;
1535 if (ublksrv_pid <= 0)
1538 ub = ublk_get_device_from_id(header->dev_id);
1542 wait_for_completion_interruptible(&ub->completion);
1544 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
1546 mutex_lock(&ub->mutex);
1547 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
1548 test_bit(UB_STATE_USED, &ub->state)) {
1553 disk = blk_mq_alloc_disk(&ub->tag_set, ub);
1555 ret = PTR_ERR(disk);
1558 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
1559 disk->fops = &ub_fops;
1560 disk->private_data = ub;
1562 ub->dev_info.ublksrv_pid = ublksrv_pid;
1565 ret = ublk_apply_params(ub);
1569 /* don't probe partitions if any one ubq daemon is un-trusted */
1570 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
1571 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
1573 get_device(&ub->cdev_dev);
1574 ret = add_disk(disk);
1577 * Has to drop the reference since ->free_disk won't be
1578 * called in case of add_disk failure.
1580 ublk_put_device(ub);
1583 set_bit(UB_STATE_USED, &ub->state);
1584 ub->dev_info.state = UBLK_S_DEV_LIVE;
1589 mutex_unlock(&ub->mutex);
1590 ublk_put_device(ub);
1594 static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
1596 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1597 void __user *argp = (void __user *)(unsigned long)header->addr;
1598 struct ublk_device *ub;
1599 cpumask_var_t cpumask;
1600 unsigned long queue;
1601 unsigned int retlen;
1605 if (header->len * BITS_PER_BYTE < nr_cpu_ids)
1607 if (header->len & (sizeof(unsigned long)-1))
1612 ub = ublk_get_device_from_id(header->dev_id);
1616 queue = header->data[0];
1617 if (queue >= ub->dev_info.nr_hw_queues)
1618 goto out_put_device;
1621 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
1622 goto out_put_device;
1624 for_each_possible_cpu(i) {
1625 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
1626 cpumask_set_cpu(i, cpumask);
1630 retlen = min_t(unsigned short, header->len, cpumask_size());
1631 if (copy_to_user(argp, cpumask, retlen))
1632 goto out_free_cpumask;
1633 if (retlen != header->len &&
1634 clear_user(argp + retlen, header->len - retlen))
1635 goto out_free_cpumask;
1639 free_cpumask_var(cpumask);
1641 ublk_put_device(ub);
1645 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
1647 pr_devel("%s: dev id %d flags %llx\n", __func__,
1648 info->dev_id, info->flags);
1649 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
1650 info->nr_hw_queues, info->queue_depth);
1653 static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
1655 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1656 void __user *argp = (void __user *)(unsigned long)header->addr;
1657 struct ublksrv_ctrl_dev_info info;
1658 struct ublk_device *ub;
1661 if (header->len < sizeof(info) || !header->addr)
1663 if (header->queue_id != (u16)-1) {
1664 pr_warn("%s: queue_id is wrong %x\n",
1665 __func__, header->queue_id);
1668 if (copy_from_user(&info, argp, sizeof(info)))
1670 ublk_dump_dev_info(&info);
1671 if (header->dev_id != info.dev_id) {
1672 pr_warn("%s: dev id not match %u %u\n",
1673 __func__, header->dev_id, info.dev_id);
1677 ret = mutex_lock_killable(&ublk_ctl_mutex);
1682 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
1685 mutex_init(&ub->mutex);
1686 spin_lock_init(&ub->mm_lock);
1687 INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
1688 INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
1689 INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
1691 ret = ublk_alloc_dev_number(ub, header->dev_id);
1695 memcpy(&ub->dev_info, &info, sizeof(info));
1697 /* update device id */
1698 ub->dev_info.dev_id = ub->ub_number;
1701 * 64bit flags will be copied back to userspace as feature
1702 * negotiation result, so have to clear flags which driver
1703 * doesn't support yet, then userspace can get correct flags
1704 * (features) to handle.
1706 ub->dev_info.flags &= UBLK_F_ALL;
1708 if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
1709 ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
1711 /* We are not ready to support zero copy */
1712 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
1714 ub->dev_info.nr_hw_queues = min_t(unsigned int,
1715 ub->dev_info.nr_hw_queues, nr_cpu_ids);
1716 ublk_align_max_io_size(ub);
1718 ret = ublk_init_queues(ub);
1720 goto out_free_dev_number;
1722 ret = ublk_add_tag_set(ub);
1724 goto out_deinit_queues;
1727 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
1728 goto out_free_tag_set;
1731 * Add the char dev so that ublksrv daemon can be setup.
1732 * ublk_add_chdev() will cleanup everything if it fails.
1734 ret = ublk_add_chdev(ub);
1738 blk_mq_free_tag_set(&ub->tag_set);
1740 ublk_deinit_queues(ub);
1741 out_free_dev_number:
1742 ublk_free_dev_number(ub);
1744 mutex_destroy(&ub->mutex);
1747 mutex_unlock(&ublk_ctl_mutex);
1751 static inline bool ublk_idr_freed(int id)
1755 spin_lock(&ublk_idr_lock);
1756 ptr = idr_find(&ublk_index_idr, id);
1757 spin_unlock(&ublk_idr_lock);
1762 static int ublk_ctrl_del_dev(int idx)
1764 struct ublk_device *ub;
1767 ret = mutex_lock_killable(&ublk_ctl_mutex);
1771 ub = ublk_get_device_from_id(idx);
1774 ublk_put_device(ub);
1781 * Wait until the idr is removed, then it can be reused after
1782 * DEL_DEV command is returned.
1785 wait_event(ublk_idr_wq, ublk_idr_freed(idx));
1786 mutex_unlock(&ublk_ctl_mutex);
1791 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
1793 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1795 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
1796 __func__, cmd->cmd_op, header->dev_id, header->queue_id,
1797 header->data[0], header->addr, header->len);
1800 static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
1802 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1803 struct ublk_device *ub;
1805 ub = ublk_get_device_from_id(header->dev_id);
1810 cancel_work_sync(&ub->stop_work);
1811 cancel_work_sync(&ub->quiesce_work);
1813 ublk_put_device(ub);
1817 static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
1819 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1820 void __user *argp = (void __user *)(unsigned long)header->addr;
1821 struct ublk_device *ub;
1824 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
1827 ub = ublk_get_device_from_id(header->dev_id);
1831 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
1833 ublk_put_device(ub);
1838 static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
1840 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1841 void __user *argp = (void __user *)(unsigned long)header->addr;
1842 struct ublk_params_header ph;
1843 struct ublk_device *ub;
1846 if (header->len <= sizeof(ph) || !header->addr)
1849 if (copy_from_user(&ph, argp, sizeof(ph)))
1852 if (ph.len > header->len || !ph.len)
1855 if (ph.len > sizeof(struct ublk_params))
1856 ph.len = sizeof(struct ublk_params);
1858 ub = ublk_get_device_from_id(header->dev_id);
1862 mutex_lock(&ub->mutex);
1863 if (copy_to_user(argp, &ub->params, ph.len))
1867 mutex_unlock(&ub->mutex);
1869 ublk_put_device(ub);
1873 static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
1875 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1876 void __user *argp = (void __user *)(unsigned long)header->addr;
1877 struct ublk_params_header ph;
1878 struct ublk_device *ub;
1881 if (header->len <= sizeof(ph) || !header->addr)
1884 if (copy_from_user(&ph, argp, sizeof(ph)))
1887 if (ph.len > header->len || !ph.len || !ph.types)
1890 if (ph.len > sizeof(struct ublk_params))
1891 ph.len = sizeof(struct ublk_params);
1893 ub = ublk_get_device_from_id(header->dev_id);
1897 /* parameters can only be changed when device isn't live */
1898 mutex_lock(&ub->mutex);
1899 if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
1901 } else if (copy_from_user(&ub->params, argp, ph.len)) {
1904 /* clear all we don't support yet */
1905 ub->params.types &= UBLK_PARAM_TYPE_ALL;
1906 ret = ublk_validate_params(ub);
1908 ub->params.types = 0;
1910 mutex_unlock(&ub->mutex);
1911 ublk_put_device(ub);
1916 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
1920 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
1921 /* All old ioucmds have to be completed */
1922 WARN_ON_ONCE(ubq->nr_io_ready);
1923 /* old daemon is PF_EXITING, put it now */
1924 put_task_struct(ubq->ubq_daemon);
1925 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
1926 ubq->ubq_daemon = NULL;
1928 for (i = 0; i < ubq->q_depth; i++) {
1929 struct ublk_io *io = &ubq->ios[i];
1931 /* forget everything now and be ready for new FETCH_REQ */
1938 static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
1940 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1941 struct ublk_device *ub;
1945 ub = ublk_get_device_from_id(header->dev_id);
1949 mutex_lock(&ub->mutex);
1950 if (!ublk_can_use_recovery(ub))
1953 * START_RECOVERY is only allowd after:
1955 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
1956 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
1959 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
1960 * (a)has quiesced request queue
1961 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
1962 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
1963 * (d)has completed/camceled all ioucmds owned by ther dying process
1965 if (test_bit(UB_STATE_OPEN, &ub->state) ||
1966 ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
1970 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
1971 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1972 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
1973 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
1975 ub->nr_queues_ready = 0;
1976 ub->nr_privileged_daemon = 0;
1977 init_completion(&ub->completion);
1980 mutex_unlock(&ub->mutex);
1981 ublk_put_device(ub);
1985 static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
1987 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1988 int ublksrv_pid = (int)header->data[0];
1989 struct ublk_device *ub;
1992 ub = ublk_get_device_from_id(header->dev_id);
1996 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
1997 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
1998 /* wait until new ubq_daemon sending all FETCH_REQ */
1999 wait_for_completion_interruptible(&ub->completion);
2000 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2001 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2003 mutex_lock(&ub->mutex);
2004 if (!ublk_can_use_recovery(ub))
2007 if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2011 ub->dev_info.ublksrv_pid = ublksrv_pid;
2012 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2013 __func__, ublksrv_pid, header->dev_id);
2014 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2015 pr_devel("%s: queue unquiesced, dev id %d.\n",
2016 __func__, header->dev_id);
2017 blk_mq_kick_requeue_list(ub->ub_disk->queue);
2018 ub->dev_info.state = UBLK_S_DEV_LIVE;
2019 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2022 mutex_unlock(&ub->mutex);
2023 ublk_put_device(ub);
2027 static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
2028 unsigned int issue_flags)
2030 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
2033 if (issue_flags & IO_URING_F_NONBLOCK)
2036 ublk_ctrl_cmd_dump(cmd);
2038 if (!(issue_flags & IO_URING_F_SQE128))
2042 if (!capable(CAP_SYS_ADMIN))
2046 switch (cmd->cmd_op) {
2047 case UBLK_CMD_START_DEV:
2048 ret = ublk_ctrl_start_dev(cmd);
2050 case UBLK_CMD_STOP_DEV:
2051 ret = ublk_ctrl_stop_dev(cmd);
2053 case UBLK_CMD_GET_DEV_INFO:
2054 ret = ublk_ctrl_get_dev_info(cmd);
2056 case UBLK_CMD_ADD_DEV:
2057 ret = ublk_ctrl_add_dev(cmd);
2059 case UBLK_CMD_DEL_DEV:
2060 ret = ublk_ctrl_del_dev(header->dev_id);
2062 case UBLK_CMD_GET_QUEUE_AFFINITY:
2063 ret = ublk_ctrl_get_queue_affinity(cmd);
2065 case UBLK_CMD_GET_PARAMS:
2066 ret = ublk_ctrl_get_params(cmd);
2068 case UBLK_CMD_SET_PARAMS:
2069 ret = ublk_ctrl_set_params(cmd);
2071 case UBLK_CMD_START_USER_RECOVERY:
2072 ret = ublk_ctrl_start_recovery(cmd);
2074 case UBLK_CMD_END_USER_RECOVERY:
2075 ret = ublk_ctrl_end_recovery(cmd);
2081 io_uring_cmd_done(cmd, ret, 0, issue_flags);
2082 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2083 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
2084 return -EIOCBQUEUED;
2087 static const struct file_operations ublk_ctl_fops = {
2088 .open = nonseekable_open,
2089 .uring_cmd = ublk_ctrl_uring_cmd,
2090 .owner = THIS_MODULE,
2091 .llseek = noop_llseek,
2094 static struct miscdevice ublk_misc = {
2095 .minor = MISC_DYNAMIC_MINOR,
2096 .name = "ublk-control",
2097 .fops = &ublk_ctl_fops,
2100 static int __init ublk_init(void)
2104 init_waitqueue_head(&ublk_idr_wq);
2106 ret = misc_register(&ublk_misc);
2110 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
2112 goto unregister_mis;
2114 ublk_chr_class = class_create(THIS_MODULE, "ublk-char");
2115 if (IS_ERR(ublk_chr_class)) {
2116 ret = PTR_ERR(ublk_chr_class);
2117 goto free_chrdev_region;
2122 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2124 misc_deregister(&ublk_misc);
2128 static void __exit ublk_exit(void)
2130 struct ublk_device *ub;
2133 idr_for_each_entry(&ublk_index_idr, ub, id)
2136 class_destroy(ublk_chr_class);
2137 misc_deregister(&ublk_misc);
2139 idr_destroy(&ublk_index_idr);
2140 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2143 module_init(ublk_init);
2144 module_exit(ublk_exit);
2146 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
2147 MODULE_LICENSE("GPL");