2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
13 #define DM_MSG_PREFIX "core-rq"
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
21 * Request-based DM's mempools' reserved IOs set by the user.
23 #define RESERVED_REQUEST_BASED_IOS 256
24 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
26 static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
28 bool dm_use_blk_mq_default(void)
33 bool dm_use_blk_mq(struct mapped_device *md)
35 return md->use_blk_mq;
37 EXPORT_SYMBOL_GPL(dm_use_blk_mq);
39 unsigned dm_get_reserved_rq_based_ios(void)
41 return __dm_get_module_param(&reserved_rq_based_ios,
42 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
44 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
46 static unsigned dm_get_blk_mq_nr_hw_queues(void)
48 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
51 static unsigned dm_get_blk_mq_queue_depth(void)
53 return __dm_get_module_param(&dm_mq_queue_depth,
54 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
57 int dm_request_based(struct mapped_device *md)
59 return queue_is_rq_based(md->queue);
62 static void dm_old_start_queue(struct request_queue *q)
66 spin_lock_irqsave(q->queue_lock, flags);
67 if (blk_queue_stopped(q))
69 spin_unlock_irqrestore(q->queue_lock, flags);
72 static void dm_mq_start_queue(struct request_queue *q)
74 blk_mq_unquiesce_queue(q);
75 blk_mq_kick_requeue_list(q);
78 void dm_start_queue(struct request_queue *q)
81 dm_old_start_queue(q);
86 static void dm_old_stop_queue(struct request_queue *q)
90 spin_lock_irqsave(q->queue_lock, flags);
91 if (!blk_queue_stopped(q))
93 spin_unlock_irqrestore(q->queue_lock, flags);
96 static void dm_mq_stop_queue(struct request_queue *q)
98 blk_mq_quiesce_queue(q);
101 void dm_stop_queue(struct request_queue *q)
104 dm_old_stop_queue(q);
110 * Partial completion handling for request-based dm
112 static void end_clone_bio(struct bio *clone)
114 struct dm_rq_clone_bio_info *info =
115 container_of(clone, struct dm_rq_clone_bio_info, clone);
116 struct dm_rq_target_io *tio = info->tio;
117 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
118 blk_status_t error = clone->bi_status;
119 bool is_last = !clone->bi_next;
125 * An error has already been detected on the request.
126 * Once error occurred, just let clone->end_io() handle
132 * Don't notice the error to the upper layer yet.
133 * The error handling decision is made by the target driver,
134 * when the request is completed.
141 * I/O for the bio successfully completed.
142 * Notice the data completion to the upper layer.
144 tio->completed += nr_bytes;
147 * Update the original request.
148 * Do not use blk_end_request() here, because it may complete
149 * the original request before the clone, and break the ordering.
153 blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
156 static struct dm_rq_target_io *tio_from_request(struct request *rq)
158 return blk_mq_rq_to_pdu(rq);
161 static void rq_end_stats(struct mapped_device *md, struct request *orig)
163 if (unlikely(dm_stats_used(&md->stats))) {
164 struct dm_rq_target_io *tio = tio_from_request(orig);
165 tio->duration_jiffies = jiffies - tio->duration_jiffies;
166 dm_stats_account_io(&md->stats, rq_data_dir(orig),
167 blk_rq_pos(orig), tio->n_sectors, true,
168 tio->duration_jiffies, &tio->stats_aux);
173 * Don't touch any member of the md after calling this function because
174 * the md may be freed in dm_put() at the end of this function.
175 * Or do dm_get() before calling this function and dm_put() later.
177 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
179 struct request_queue *q = md->queue;
182 atomic_dec(&md->pending[rw]);
184 /* nudge anyone waiting on suspend queue */
185 if (!md_in_flight(md))
189 * Run this off this callpath, as drivers could invoke end_io while
190 * inside their request_fn (and holding the queue lock). Calling
191 * back into ->request_fn() could deadlock attempting to grab the
194 if (!q->mq_ops && run_queue) {
195 spin_lock_irqsave(q->queue_lock, flags);
196 blk_run_queue_async(q);
197 spin_unlock_irqrestore(q->queue_lock, flags);
201 * dm_put() must be at the end of this function. See the comment above
207 * Complete the clone and the original request.
208 * Must be called without clone's queue lock held,
209 * see end_clone_request() for more details.
211 static void dm_end_request(struct request *clone, blk_status_t error)
213 int rw = rq_data_dir(clone);
214 struct dm_rq_target_io *tio = clone->end_io_data;
215 struct mapped_device *md = tio->md;
216 struct request *rq = tio->orig;
218 blk_rq_unprep_clone(clone);
219 tio->ti->type->release_clone_rq(clone, NULL);
221 rq_end_stats(md, rq);
223 blk_end_request_all(rq, error);
225 blk_mq_end_request(rq, error);
226 rq_completed(md, rw, true);
230 * Requeue the original request of a clone.
232 static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms)
234 struct request_queue *q = rq->q;
237 spin_lock_irqsave(q->queue_lock, flags);
238 blk_requeue_request(q, rq);
239 blk_delay_queue(q, delay_ms);
240 spin_unlock_irqrestore(q->queue_lock, flags);
243 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
245 blk_mq_delay_kick_requeue_list(q, msecs);
248 void dm_mq_kick_requeue_list(struct mapped_device *md)
250 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
252 EXPORT_SYMBOL(dm_mq_kick_requeue_list);
254 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
256 blk_mq_requeue_request(rq, false);
257 __dm_mq_kick_requeue_list(rq->q, msecs);
260 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
262 struct mapped_device *md = tio->md;
263 struct request *rq = tio->orig;
264 int rw = rq_data_dir(rq);
265 unsigned long delay_ms = delay_requeue ? 100 : 0;
267 rq_end_stats(md, rq);
269 blk_rq_unprep_clone(tio->clone);
270 tio->ti->type->release_clone_rq(tio->clone, NULL);
274 dm_old_requeue_request(rq, delay_ms);
276 dm_mq_delay_requeue_request(rq, delay_ms);
278 rq_completed(md, rw, false);
281 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
283 int r = DM_ENDIO_DONE;
284 struct dm_rq_target_io *tio = clone->end_io_data;
285 dm_request_endio_fn rq_end_io = NULL;
288 rq_end_io = tio->ti->type->rq_end_io;
290 if (mapped && rq_end_io)
291 r = rq_end_io(tio->ti, clone, error, &tio->info);
294 if (unlikely(error == BLK_STS_TARGET)) {
295 if (req_op(clone) == REQ_OP_DISCARD &&
296 !clone->q->limits.max_discard_sectors)
297 disable_discard(tio->md);
298 else if (req_op(clone) == REQ_OP_WRITE_SAME &&
299 !clone->q->limits.max_write_same_sectors)
300 disable_write_same(tio->md);
301 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
302 !clone->q->limits.max_write_zeroes_sectors)
303 disable_write_zeroes(tio->md);
308 /* The target wants to complete the I/O */
309 dm_end_request(clone, error);
311 case DM_ENDIO_INCOMPLETE:
312 /* The target will handle the I/O */
314 case DM_ENDIO_REQUEUE:
315 /* The target wants to requeue the I/O */
316 dm_requeue_original_request(tio, false);
318 case DM_ENDIO_DELAY_REQUEUE:
319 /* The target wants to requeue the I/O after a delay */
320 dm_requeue_original_request(tio, true);
323 DMWARN("unimplemented target endio return value: %d", r);
329 * Request completion handler for request-based dm
331 static void dm_softirq_done(struct request *rq)
334 struct dm_rq_target_io *tio = tio_from_request(rq);
335 struct request *clone = tio->clone;
339 struct mapped_device *md = tio->md;
341 rq_end_stats(md, rq);
342 rw = rq_data_dir(rq);
344 blk_end_request_all(rq, tio->error);
346 blk_mq_end_request(rq, tio->error);
347 rq_completed(md, rw, false);
351 if (rq->rq_flags & RQF_FAILED)
354 dm_done(clone, tio->error, mapped);
358 * Complete the clone and the original request with the error status
359 * through softirq context.
361 static void dm_complete_request(struct request *rq, blk_status_t error)
363 struct dm_rq_target_io *tio = tio_from_request(rq);
367 blk_complete_request(rq);
369 blk_mq_complete_request(rq);
373 * Complete the not-mapped clone and the original request with the error status
374 * through softirq context.
375 * Target's rq_end_io() function isn't called.
376 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
378 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
380 rq->rq_flags |= RQF_FAILED;
381 dm_complete_request(rq, error);
385 * Called with the clone's queue lock held (in the case of .request_fn)
387 static void end_clone_request(struct request *clone, blk_status_t error)
389 struct dm_rq_target_io *tio = clone->end_io_data;
392 * Actual request completion is done in a softirq context which doesn't
393 * hold the clone's queue lock. Otherwise, deadlock could occur because:
394 * - another request may be submitted by the upper level driver
395 * of the stacking during the completion
396 * - the submission which requires queue lock may be done
397 * against this clone's queue
399 dm_complete_request(tio->orig, error);
402 static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
406 if (blk_queue_io_stat(clone->q))
407 clone->rq_flags |= RQF_IO_STAT;
409 clone->start_time_ns = ktime_get_ns();
410 r = blk_insert_cloned_request(clone->q, clone);
411 if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
412 /* must complete clone in terms of original request */
413 dm_complete_request(rq, r);
417 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
420 struct dm_rq_target_io *tio = data;
421 struct dm_rq_clone_bio_info *info =
422 container_of(bio, struct dm_rq_clone_bio_info, clone);
424 info->orig = bio_orig;
426 bio->bi_end_io = end_clone_bio;
431 static int setup_clone(struct request *clone, struct request *rq,
432 struct dm_rq_target_io *tio, gfp_t gfp_mask)
436 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
437 dm_rq_bio_constructor, tio);
441 clone->end_io = end_clone_request;
442 clone->end_io_data = tio;
449 static void map_tio_request(struct kthread_work *work);
451 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
452 struct mapped_device *md)
461 * Avoid initializing info for blk-mq; it passes
462 * target-specific data through info.ptr
463 * (see: dm_mq_init_request)
465 if (!md->init_tio_pdu)
466 memset(&tio->info, 0, sizeof(tio->info));
467 if (md->kworker_task)
468 kthread_init_work(&tio->work, map_tio_request);
473 * DM_MAPIO_* : the request has been processed as indicated
474 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
475 * < 0 : the request was completed due to failure
477 static int map_request(struct dm_rq_target_io *tio)
480 struct dm_target *ti = tio->ti;
481 struct mapped_device *md = tio->md;
482 struct request *rq = tio->orig;
483 struct request *clone = NULL;
486 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
489 case DM_MAPIO_SUBMITTED:
490 /* The target has taken the I/O to submit by itself later */
492 case DM_MAPIO_REMAPPED:
493 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
495 ti->type->release_clone_rq(clone, &tio->info);
496 return DM_MAPIO_REQUEUE;
499 /* The target has remapped the I/O so dispatch it */
500 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
502 ret = dm_dispatch_clone_request(clone, rq);
503 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
504 blk_rq_unprep_clone(clone);
505 blk_mq_cleanup_rq(clone);
506 tio->ti->type->release_clone_rq(clone, &tio->info);
509 r = DM_MAPIO_DELAY_REQUEUE;
511 r = DM_MAPIO_REQUEUE;
515 case DM_MAPIO_REQUEUE:
516 /* The target wants to requeue the I/O */
518 case DM_MAPIO_DELAY_REQUEUE:
519 /* The target wants to requeue the I/O after a delay */
520 dm_requeue_original_request(tio, true);
523 /* The target wants to complete the I/O */
524 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
527 DMWARN("unimplemented target map return value: %d", r);
534 static void dm_start_request(struct mapped_device *md, struct request *orig)
536 if (!orig->q->mq_ops)
537 blk_start_request(orig);
539 blk_mq_start_request(orig);
540 atomic_inc(&md->pending[rq_data_dir(orig)]);
542 if (md->seq_rq_merge_deadline_usecs) {
543 md->last_rq_pos = rq_end_sector(orig);
544 md->last_rq_rw = rq_data_dir(orig);
545 md->last_rq_start_time = ktime_get();
548 if (unlikely(dm_stats_used(&md->stats))) {
549 struct dm_rq_target_io *tio = tio_from_request(orig);
550 tio->duration_jiffies = jiffies;
551 tio->n_sectors = blk_rq_sectors(orig);
552 dm_stats_account_io(&md->stats, rq_data_dir(orig),
553 blk_rq_pos(orig), tio->n_sectors, false, 0,
558 * Hold the md reference here for the in-flight I/O.
559 * We can't rely on the reference count by device opener,
560 * because the device may be closed during the request completion
561 * when all bios are completed.
562 * See the comment in rq_completed() too.
567 static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
569 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
572 * Must initialize md member of tio, otherwise it won't
573 * be available in dm_mq_queue_rq.
577 if (md->init_tio_pdu) {
578 /* target-specific per-io data is immediately after the tio */
579 tio->info.ptr = tio + 1;
585 static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
587 return __dm_rq_init_rq(q->rq_alloc_data, rq);
590 static void map_tio_request(struct kthread_work *work)
592 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
594 if (map_request(tio) == DM_MAPIO_REQUEUE)
595 dm_requeue_original_request(tio, false);
598 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
600 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
603 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
605 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
606 const char *buf, size_t count)
610 if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
613 if (kstrtouint(buf, 10, &deadline))
616 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
617 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
619 md->seq_rq_merge_deadline_usecs = deadline;
624 static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
628 if (!md->seq_rq_merge_deadline_usecs)
631 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
632 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
634 return !ktime_after(ktime_get(), kt_deadline);
638 * q->request_fn for old request-based dm.
639 * Called with the queue lock held.
641 static void dm_old_request_fn(struct request_queue *q)
643 struct mapped_device *md = q->queuedata;
644 struct dm_target *ti = md->immutable_target;
646 struct dm_rq_target_io *tio;
651 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
653 if (unlikely(!map)) {
654 dm_put_live_table(md, srcu_idx);
657 ti = dm_table_find_target(map, pos);
658 dm_put_live_table(md, srcu_idx);
662 * For suspend, check blk_queue_stopped() and increment
663 * ->pending within a single queue_lock not to increment the
664 * number of in-flight I/Os after the queue is stopped in
667 while (!blk_queue_stopped(q)) {
668 rq = blk_peek_request(q);
672 /* always use block 0 to find the target for flushes for now */
674 if (req_op(rq) != REQ_OP_FLUSH)
675 pos = blk_rq_pos(rq);
677 if ((dm_old_request_peeked_before_merge_deadline(md) &&
678 md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
679 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
680 (ti->type->busy && ti->type->busy(ti))) {
681 blk_delay_queue(q, 10);
685 dm_start_request(md, rq);
687 tio = tio_from_request(rq);
688 init_tio(tio, rq, md);
689 /* Establish tio->ti before queuing work (map_tio_request) */
691 kthread_queue_work(&md->kworker, &tio->work);
692 BUG_ON(!irqs_disabled());
697 * Fully initialize a .request_fn request-based queue.
699 int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
701 struct dm_target *immutable_tgt;
703 /* Fully initialize the queue */
704 md->queue->cmd_size = sizeof(struct dm_rq_target_io);
705 md->queue->rq_alloc_data = md;
706 md->queue->request_fn = dm_old_request_fn;
707 md->queue->init_rq_fn = dm_rq_init_rq;
709 immutable_tgt = dm_table_get_immutable_target(t);
710 if (immutable_tgt && immutable_tgt->per_io_data_size) {
711 /* any target-specific per-io data is immediately after the tio */
712 md->queue->cmd_size += immutable_tgt->per_io_data_size;
713 md->init_tio_pdu = true;
715 if (blk_init_allocated_queue(md->queue) < 0)
718 /* disable dm_old_request_fn's merge heuristic by default */
719 md->seq_rq_merge_deadline_usecs = 0;
721 blk_queue_softirq_done(md->queue, dm_softirq_done);
723 /* Initialize the request-based DM worker thread */
724 kthread_init_worker(&md->kworker);
725 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
726 "kdmwork-%s", dm_device_name(md));
727 if (IS_ERR(md->kworker_task)) {
728 int error = PTR_ERR(md->kworker_task);
729 md->kworker_task = NULL;
736 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
737 unsigned int hctx_idx, unsigned int numa_node)
739 return __dm_rq_init_rq(set->driver_data, rq);
742 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
743 const struct blk_mq_queue_data *bd)
745 struct request *rq = bd->rq;
746 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
747 struct mapped_device *md = tio->md;
748 struct dm_target *ti = md->immutable_target;
752 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
754 ti = dm_table_find_target(map, 0);
755 dm_put_live_table(md, srcu_idx);
758 if (ti->type->busy && ti->type->busy(ti))
759 return BLK_STS_RESOURCE;
761 dm_start_request(md, rq);
763 /* Init tio using md established in .init_request */
764 init_tio(tio, rq, md);
767 * Establish tio->ti before calling map_request().
771 /* Direct call is fine since .queue_rq allows allocations */
772 if (map_request(tio) == DM_MAPIO_REQUEUE) {
773 /* Undo dm_start_request() before requeuing */
774 rq_end_stats(md, rq);
775 rq_completed(md, rq_data_dir(rq), false);
776 return BLK_STS_RESOURCE;
782 static const struct blk_mq_ops dm_mq_ops = {
783 .queue_rq = dm_mq_queue_rq,
784 .complete = dm_softirq_done,
785 .init_request = dm_mq_init_request,
788 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
790 struct request_queue *q;
791 struct dm_target *immutable_tgt;
794 if (!dm_table_all_blk_mq_devices(t)) {
795 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
799 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
803 md->tag_set->ops = &dm_mq_ops;
804 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
805 md->tag_set->numa_node = md->numa_node_id;
806 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
807 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
808 md->tag_set->driver_data = md;
810 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
811 immutable_tgt = dm_table_get_immutable_target(t);
812 if (immutable_tgt && immutable_tgt->per_io_data_size) {
813 /* any target-specific per-io data is immediately after the tio */
814 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
815 md->init_tio_pdu = true;
818 err = blk_mq_alloc_tag_set(md->tag_set);
820 goto out_kfree_tag_set;
822 q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
831 blk_mq_free_tag_set(md->tag_set);
839 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
842 blk_mq_free_tag_set(md->tag_set);
848 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
849 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
851 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
852 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
854 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
855 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
857 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
858 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");