2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
10 #include <linux/blk-mq.h>
12 #define DM_MSG_PREFIX "core-rq"
15 * One of these is allocated per request.
17 struct dm_rq_target_io {
18 struct mapped_device *md;
20 struct request *orig, *clone;
21 struct kthread_work work;
24 struct dm_stats_aux stats_aux;
25 unsigned long duration_jiffies;
30 #define DM_MQ_NR_HW_QUEUES 1
31 #define DM_MQ_QUEUE_DEPTH 2048
32 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
33 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
36 * Request-based DM's mempools' reserved IOs set by the user.
38 #define RESERVED_REQUEST_BASED_IOS 256
39 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
41 unsigned dm_get_reserved_rq_based_ios(void)
43 return __dm_get_module_param(&reserved_rq_based_ios,
44 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
46 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
48 static unsigned dm_get_blk_mq_nr_hw_queues(void)
50 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
53 static unsigned dm_get_blk_mq_queue_depth(void)
55 return __dm_get_module_param(&dm_mq_queue_depth,
56 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
59 int dm_request_based(struct mapped_device *md)
61 return queue_is_mq(md->queue);
64 void dm_start_queue(struct request_queue *q)
66 blk_mq_unquiesce_queue(q);
67 blk_mq_kick_requeue_list(q);
70 void dm_stop_queue(struct request_queue *q)
72 blk_mq_quiesce_queue(q);
76 * Partial completion handling for request-based dm
78 static void end_clone_bio(struct bio *clone)
80 struct dm_rq_clone_bio_info *info =
81 container_of(clone, struct dm_rq_clone_bio_info, clone);
82 struct dm_rq_target_io *tio = info->tio;
83 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
84 blk_status_t error = clone->bi_status;
85 bool is_last = !clone->bi_next;
91 * An error has already been detected on the request.
92 * Once error occurred, just let clone->end_io() handle
98 * Don't notice the error to the upper layer yet.
99 * The error handling decision is made by the target driver,
100 * when the request is completed.
107 * I/O for the bio successfully completed.
108 * Notice the data completion to the upper layer.
110 tio->completed += nr_bytes;
113 * Update the original request.
114 * Do not use blk_mq_end_request() here, because it may complete
115 * the original request before the clone, and break the ordering.
119 blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
122 static struct dm_rq_target_io *tio_from_request(struct request *rq)
124 return blk_mq_rq_to_pdu(rq);
127 static void rq_end_stats(struct mapped_device *md, struct request *orig)
129 if (unlikely(dm_stats_used(&md->stats))) {
130 struct dm_rq_target_io *tio = tio_from_request(orig);
131 tio->duration_jiffies = jiffies - tio->duration_jiffies;
132 dm_stats_account_io(&md->stats, rq_data_dir(orig),
133 blk_rq_pos(orig), tio->n_sectors, true,
134 tio->duration_jiffies, &tio->stats_aux);
139 * Don't touch any member of the md after calling this function because
140 * the md may be freed in dm_put() at the end of this function.
141 * Or do dm_get() before calling this function and dm_put() later.
143 static void rq_completed(struct mapped_device *md)
146 * dm_put() must be at the end of this function. See the comment above
152 * Complete the clone and the original request.
153 * Must be called without clone's queue lock held,
154 * see end_clone_request() for more details.
156 static void dm_end_request(struct request *clone, blk_status_t error)
158 struct dm_rq_target_io *tio = clone->end_io_data;
159 struct mapped_device *md = tio->md;
160 struct request *rq = tio->orig;
162 blk_rq_unprep_clone(clone);
163 tio->ti->type->release_clone_rq(clone, NULL);
165 rq_end_stats(md, rq);
166 blk_mq_end_request(rq, error);
170 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
172 blk_mq_delay_kick_requeue_list(q, msecs);
175 void dm_mq_kick_requeue_list(struct mapped_device *md)
177 __dm_mq_kick_requeue_list(md->queue, 0);
179 EXPORT_SYMBOL(dm_mq_kick_requeue_list);
181 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
183 blk_mq_requeue_request(rq, false);
184 __dm_mq_kick_requeue_list(rq->q, msecs);
187 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
189 struct mapped_device *md = tio->md;
190 struct request *rq = tio->orig;
191 unsigned long delay_ms = delay_requeue ? 100 : 0;
193 rq_end_stats(md, rq);
195 blk_rq_unprep_clone(tio->clone);
196 tio->ti->type->release_clone_rq(tio->clone, NULL);
199 dm_mq_delay_requeue_request(rq, delay_ms);
203 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
205 int r = DM_ENDIO_DONE;
206 struct dm_rq_target_io *tio = clone->end_io_data;
207 dm_request_endio_fn rq_end_io = NULL;
210 rq_end_io = tio->ti->type->rq_end_io;
212 if (mapped && rq_end_io)
213 r = rq_end_io(tio->ti, clone, error, &tio->info);
216 if (unlikely(error == BLK_STS_TARGET)) {
217 if (req_op(clone) == REQ_OP_DISCARD &&
218 !clone->q->limits.max_discard_sectors)
219 disable_discard(tio->md);
220 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
221 !clone->q->limits.max_write_zeroes_sectors)
222 disable_write_zeroes(tio->md);
227 /* The target wants to complete the I/O */
228 dm_end_request(clone, error);
230 case DM_ENDIO_INCOMPLETE:
231 /* The target will handle the I/O */
233 case DM_ENDIO_REQUEUE:
234 /* The target wants to requeue the I/O */
235 dm_requeue_original_request(tio, false);
237 case DM_ENDIO_DELAY_REQUEUE:
238 /* The target wants to requeue the I/O after a delay */
239 dm_requeue_original_request(tio, true);
242 DMWARN("unimplemented target endio return value: %d", r);
248 * Request completion handler for request-based dm
250 static void dm_softirq_done(struct request *rq)
253 struct dm_rq_target_io *tio = tio_from_request(rq);
254 struct request *clone = tio->clone;
257 struct mapped_device *md = tio->md;
259 rq_end_stats(md, rq);
260 blk_mq_end_request(rq, tio->error);
265 if (rq->rq_flags & RQF_FAILED)
268 dm_done(clone, tio->error, mapped);
272 * Complete the clone and the original request with the error status
273 * through softirq context.
275 static void dm_complete_request(struct request *rq, blk_status_t error)
277 struct dm_rq_target_io *tio = tio_from_request(rq);
280 if (likely(!blk_should_fake_timeout(rq->q)))
281 blk_mq_complete_request(rq);
285 * Complete the not-mapped clone and the original request with the error status
286 * through softirq context.
287 * Target's rq_end_io() function isn't called.
288 * This may be used when the target's clone_and_map_rq() function fails.
290 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
292 rq->rq_flags |= RQF_FAILED;
293 dm_complete_request(rq, error);
296 static void end_clone_request(struct request *clone, blk_status_t error)
298 struct dm_rq_target_io *tio = clone->end_io_data;
300 dm_complete_request(tio->orig, error);
303 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
306 struct dm_rq_target_io *tio = data;
307 struct dm_rq_clone_bio_info *info =
308 container_of(bio, struct dm_rq_clone_bio_info, clone);
310 info->orig = bio_orig;
312 bio->bi_end_io = end_clone_bio;
317 static int setup_clone(struct request *clone, struct request *rq,
318 struct dm_rq_target_io *tio, gfp_t gfp_mask)
322 r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
323 dm_rq_bio_constructor, tio);
327 clone->end_io = end_clone_request;
328 clone->end_io_data = tio;
335 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
336 struct mapped_device *md)
345 * Avoid initializing info for blk-mq; it passes
346 * target-specific data through info.ptr
347 * (see: dm_mq_init_request)
349 if (!md->init_tio_pdu)
350 memset(&tio->info, 0, sizeof(tio->info));
355 * DM_MAPIO_* : the request has been processed as indicated
356 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
357 * < 0 : the request was completed due to failure
359 static int map_request(struct dm_rq_target_io *tio)
362 struct dm_target *ti = tio->ti;
363 struct mapped_device *md = tio->md;
364 struct request *rq = tio->orig;
365 struct request *clone = NULL;
368 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
370 case DM_MAPIO_SUBMITTED:
371 /* The target has taken the I/O to submit by itself later */
373 case DM_MAPIO_REMAPPED:
374 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
376 ti->type->release_clone_rq(clone, &tio->info);
377 return DM_MAPIO_REQUEUE;
380 /* The target has remapped the I/O so dispatch it */
381 trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
383 ret = blk_insert_cloned_request(clone);
387 case BLK_STS_RESOURCE:
388 case BLK_STS_DEV_RESOURCE:
389 blk_rq_unprep_clone(clone);
390 blk_mq_cleanup_rq(clone);
391 tio->ti->type->release_clone_rq(clone, &tio->info);
393 return DM_MAPIO_REQUEUE;
395 /* must complete clone in terms of original request */
396 dm_complete_request(rq, ret);
399 case DM_MAPIO_REQUEUE:
400 /* The target wants to requeue the I/O */
402 case DM_MAPIO_DELAY_REQUEUE:
403 /* The target wants to requeue the I/O after a delay */
404 dm_requeue_original_request(tio, true);
407 /* The target wants to complete the I/O */
408 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
411 DMWARN("unimplemented target map return value: %d", r);
418 /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
419 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
421 return sprintf(buf, "%u\n", 0);
424 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
425 const char *buf, size_t count)
430 static void dm_start_request(struct mapped_device *md, struct request *orig)
432 blk_mq_start_request(orig);
434 if (unlikely(dm_stats_used(&md->stats))) {
435 struct dm_rq_target_io *tio = tio_from_request(orig);
436 tio->duration_jiffies = jiffies;
437 tio->n_sectors = blk_rq_sectors(orig);
438 dm_stats_account_io(&md->stats, rq_data_dir(orig),
439 blk_rq_pos(orig), tio->n_sectors, false, 0,
444 * Hold the md reference here for the in-flight I/O.
445 * We can't rely on the reference count by device opener,
446 * because the device may be closed during the request completion
447 * when all bios are completed.
448 * See the comment in rq_completed() too.
453 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
454 unsigned int hctx_idx, unsigned int numa_node)
456 struct mapped_device *md = set->driver_data;
457 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
460 * Must initialize md member of tio, otherwise it won't
461 * be available in dm_mq_queue_rq.
465 if (md->init_tio_pdu) {
466 /* target-specific per-io data is immediately after the tio */
467 tio->info.ptr = tio + 1;
473 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
474 const struct blk_mq_queue_data *bd)
476 struct request *rq = bd->rq;
477 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
478 struct mapped_device *md = tio->md;
479 struct dm_target *ti = md->immutable_target;
482 * blk-mq's unquiesce may come from outside events, such as
483 * elevator switch, updating nr_requests or others, and request may
484 * come during suspend, so simply ask for blk-mq to requeue it.
486 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
487 return BLK_STS_RESOURCE;
491 struct dm_table *map;
493 map = dm_get_live_table(md, &srcu_idx);
494 if (unlikely(!map)) {
495 dm_put_live_table(md, srcu_idx);
496 return BLK_STS_RESOURCE;
498 ti = dm_table_find_target(map, 0);
499 dm_put_live_table(md, srcu_idx);
502 if (ti->type->busy && ti->type->busy(ti))
503 return BLK_STS_RESOURCE;
505 dm_start_request(md, rq);
507 /* Init tio using md established in .init_request */
508 init_tio(tio, rq, md);
511 * Establish tio->ti before calling map_request().
515 /* Direct call is fine since .queue_rq allows allocations */
516 if (map_request(tio) == DM_MAPIO_REQUEUE) {
517 /* Undo dm_start_request() before requeuing */
518 rq_end_stats(md, rq);
520 return BLK_STS_RESOURCE;
526 static const struct blk_mq_ops dm_mq_ops = {
527 .queue_rq = dm_mq_queue_rq,
528 .complete = dm_softirq_done,
529 .init_request = dm_mq_init_request,
532 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
534 struct dm_target *immutable_tgt;
537 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
541 md->tag_set->ops = &dm_mq_ops;
542 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
543 md->tag_set->numa_node = md->numa_node_id;
544 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
545 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
546 md->tag_set->driver_data = md;
548 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
549 immutable_tgt = dm_table_get_immutable_target(t);
550 if (immutable_tgt && immutable_tgt->per_io_data_size) {
551 /* any target-specific per-io data is immediately after the tio */
552 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
553 md->init_tio_pdu = true;
556 err = blk_mq_alloc_tag_set(md->tag_set);
558 goto out_kfree_tag_set;
560 err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
566 blk_mq_free_tag_set(md->tag_set);
574 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
577 blk_mq_free_tag_set(md->tag_set);
583 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
584 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
586 /* Unused, but preserved for userspace compatibility */
587 static bool use_blk_mq = true;
588 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
589 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
591 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
592 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
594 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
595 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");