2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include "dm-uevent.h"
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/blkpg.h>
19 #include <linux/bio.h>
20 #include <linux/mempool.h>
21 #include <linux/dax.h>
22 #include <linux/slab.h>
23 #include <linux/idr.h>
24 #include <linux/uio.h>
25 #include <linux/hdreg.h>
26 #include <linux/delay.h>
27 #include <linux/wait.h>
29 #include <linux/refcount.h>
30 #include <linux/part_stat.h>
31 #include <linux/blk-crypto.h>
32 #include <linux/blk-crypto-profile.h>
34 #define DM_MSG_PREFIX "core"
37 * Cookies are numeric values sent with CHANGE and REMOVE
38 * uevents while resuming, removing or renaming the device.
40 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
41 #define DM_COOKIE_LENGTH 24
44 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
45 * dm_io into one list, and reuse bio->bi_private as the list head. Before
46 * ending this fs bio, we will recover its ->bi_private.
48 #define REQ_DM_POLL_LIST REQ_DRV
50 static const char *_name = DM_NAME;
52 static unsigned int major = 0;
53 static unsigned int _major = 0;
55 static DEFINE_IDR(_minor_idr);
57 static DEFINE_SPINLOCK(_minor_lock);
59 static void do_deferred_remove(struct work_struct *w);
61 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
63 static struct workqueue_struct *deferred_remove_workqueue;
65 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
66 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
68 void dm_issue_global_event(void)
70 atomic_inc(&dm_global_event_nr);
71 wake_up(&dm_global_eventq);
74 DEFINE_STATIC_KEY_FALSE(stats_enabled);
75 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
76 DEFINE_STATIC_KEY_FALSE(zoned_enabled);
79 * One of these is allocated (on-stack) per original bio.
86 unsigned sector_count;
87 bool is_abnormal_io:1;
88 bool submit_as_polled:1;
91 #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
92 #define DM_IO_BIO_OFFSET \
93 (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
95 static inline struct dm_target_io *clone_to_tio(struct bio *clone)
97 return container_of(clone, struct dm_target_io, clone);
100 void *dm_per_bio_data(struct bio *bio, size_t data_size)
102 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
103 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
104 return (char *)bio - DM_IO_BIO_OFFSET - data_size;
106 EXPORT_SYMBOL_GPL(dm_per_bio_data);
108 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
110 struct dm_io *io = (struct dm_io *)((char *)data + data_size);
111 if (io->magic == DM_IO_MAGIC)
112 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
113 BUG_ON(io->magic != DM_TIO_MAGIC);
114 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
116 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
118 unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
120 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
122 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
124 #define MINOR_ALLOCED ((void *)-1)
126 #define DM_NUMA_NODE NUMA_NO_NODE
127 static int dm_numa_node = DM_NUMA_NODE;
129 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
130 static int swap_bios = DEFAULT_SWAP_BIOS;
131 static int get_swap_bios(void)
133 int latch = READ_ONCE(swap_bios);
134 if (unlikely(latch <= 0))
135 latch = DEFAULT_SWAP_BIOS;
139 struct table_device {
140 struct list_head list;
142 struct dm_dev dm_dev;
146 * Bio-based DM's mempools' reserved IOs set by the user.
148 #define RESERVED_BIO_BASED_IOS 16
149 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
151 static int __dm_get_module_param_int(int *module_param, int min, int max)
153 int param = READ_ONCE(*module_param);
154 int modified_param = 0;
155 bool modified = true;
158 modified_param = min;
159 else if (param > max)
160 modified_param = max;
165 (void)cmpxchg(module_param, param, modified_param);
166 param = modified_param;
172 unsigned __dm_get_module_param(unsigned *module_param,
173 unsigned def, unsigned max)
175 unsigned param = READ_ONCE(*module_param);
176 unsigned modified_param = 0;
179 modified_param = def;
180 else if (param > max)
181 modified_param = max;
183 if (modified_param) {
184 (void)cmpxchg(module_param, param, modified_param);
185 param = modified_param;
191 unsigned dm_get_reserved_bio_based_ios(void)
193 return __dm_get_module_param(&reserved_bio_based_ios,
194 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
196 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
198 static unsigned dm_get_numa_node(void)
200 return __dm_get_module_param_int(&dm_numa_node,
201 DM_NUMA_NODE, num_online_nodes() - 1);
204 static int __init local_init(void)
208 r = dm_uevent_init();
212 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
213 if (!deferred_remove_workqueue) {
215 goto out_uevent_exit;
219 r = register_blkdev(_major, _name);
221 goto out_free_workqueue;
229 destroy_workqueue(deferred_remove_workqueue);
236 static void local_exit(void)
238 flush_scheduled_work();
239 destroy_workqueue(deferred_remove_workqueue);
241 unregister_blkdev(_major, _name);
246 DMINFO("cleaned up");
249 static int (*_inits[])(void) __initdata = {
260 static void (*_exits[])(void) = {
271 static int __init dm_init(void)
273 const int count = ARRAY_SIZE(_inits);
276 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
277 DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
278 " Duplicate IMA measurements will not be recorded in the IMA log.");
281 for (i = 0; i < count; i++) {
295 static void __exit dm_exit(void)
297 int i = ARRAY_SIZE(_exits);
303 * Should be empty by this point.
305 idr_destroy(&_minor_idr);
309 * Block device functions
311 int dm_deleting_md(struct mapped_device *md)
313 return test_bit(DMF_DELETING, &md->flags);
316 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
318 struct mapped_device *md;
320 spin_lock(&_minor_lock);
322 md = bdev->bd_disk->private_data;
326 if (test_bit(DMF_FREEING, &md->flags) ||
327 dm_deleting_md(md)) {
333 atomic_inc(&md->open_count);
335 spin_unlock(&_minor_lock);
337 return md ? 0 : -ENXIO;
340 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
342 struct mapped_device *md;
344 spin_lock(&_minor_lock);
346 md = disk->private_data;
350 if (atomic_dec_and_test(&md->open_count) &&
351 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
352 queue_work(deferred_remove_workqueue, &deferred_remove_work);
356 spin_unlock(&_minor_lock);
359 int dm_open_count(struct mapped_device *md)
361 return atomic_read(&md->open_count);
365 * Guarantees nothing is using the device before it's deleted.
367 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
371 spin_lock(&_minor_lock);
373 if (dm_open_count(md)) {
376 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
377 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
380 set_bit(DMF_DELETING, &md->flags);
382 spin_unlock(&_minor_lock);
387 int dm_cancel_deferred_remove(struct mapped_device *md)
391 spin_lock(&_minor_lock);
393 if (test_bit(DMF_DELETING, &md->flags))
396 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
398 spin_unlock(&_minor_lock);
403 static void do_deferred_remove(struct work_struct *w)
405 dm_deferred_remove();
408 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
410 struct mapped_device *md = bdev->bd_disk->private_data;
412 return dm_get_geometry(md, geo);
415 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
416 struct block_device **bdev)
418 struct dm_target *tgt;
419 struct dm_table *map;
424 map = dm_get_live_table(md, srcu_idx);
425 if (!map || !dm_table_get_size(map))
428 /* We only support devices that have a single target */
429 if (dm_table_get_num_targets(map) != 1)
432 tgt = dm_table_get_target(map, 0);
433 if (!tgt->type->prepare_ioctl)
436 if (dm_suspended_md(md))
439 r = tgt->type->prepare_ioctl(tgt, bdev);
440 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
441 dm_put_live_table(md, *srcu_idx);
449 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
451 dm_put_live_table(md, srcu_idx);
454 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
455 unsigned int cmd, unsigned long arg)
457 struct mapped_device *md = bdev->bd_disk->private_data;
460 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
466 * Target determined this ioctl is being issued against a
467 * subset of the parent bdev; require extra privileges.
469 if (!capable(CAP_SYS_RAWIO)) {
471 "%s: sending ioctl %x to DM device without required privilege.",
478 if (!bdev->bd_disk->fops->ioctl)
481 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
483 dm_unprepare_ioctl(md, srcu_idx);
487 u64 dm_start_time_ns_from_clone(struct bio *bio)
489 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
491 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
493 static bool bio_is_flush_with_data(struct bio *bio)
495 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
498 static void dm_io_acct(struct dm_io *io, bool end)
500 struct dm_stats_aux *stats_aux = &io->stats_aux;
501 unsigned long start_time = io->start_time;
502 struct mapped_device *md = io->md;
503 struct bio *bio = io->orig_bio;
504 unsigned int sectors;
507 * If REQ_PREFLUSH set, don't account payload, it will be
508 * submitted (and accounted) after this flush completes.
510 if (bio_is_flush_with_data(bio))
512 else if (likely(!(dm_io_flagged(io, DM_IO_WAS_SPLIT))))
513 sectors = bio_sectors(bio);
515 sectors = io->sectors;
518 bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio),
521 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
523 if (static_branch_unlikely(&stats_enabled) &&
524 unlikely(dm_stats_used(&md->stats))) {
527 if (likely(!dm_io_flagged(io, DM_IO_WAS_SPLIT)))
528 sector = bio->bi_iter.bi_sector;
530 sector = bio_end_sector(bio) - io->sector_offset;
532 dm_stats_account_io(&md->stats, bio_data_dir(bio),
534 end, start_time, stats_aux);
538 static void __dm_start_io_acct(struct dm_io *io)
540 dm_io_acct(io, false);
543 static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
546 * Ensure IO accounting is only ever started once.
548 if (dm_io_flagged(io, DM_IO_ACCOUNTED))
551 /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
552 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
553 dm_io_set_flag(io, DM_IO_ACCOUNTED);
556 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
557 spin_lock_irqsave(&io->lock, flags);
558 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
559 spin_unlock_irqrestore(&io->lock, flags);
562 dm_io_set_flag(io, DM_IO_ACCOUNTED);
563 spin_unlock_irqrestore(&io->lock, flags);
566 __dm_start_io_acct(io);
569 static void dm_end_io_acct(struct dm_io *io)
571 dm_io_acct(io, true);
574 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
577 struct dm_target_io *tio;
580 clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
581 tio = clone_to_tio(clone);
583 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
586 io = container_of(tio, struct dm_io, tio);
587 io->magic = DM_IO_MAGIC;
588 io->status = BLK_STS_OK;
590 /* one ref is for submission, the other is for completion */
591 atomic_set(&io->io_count, 2);
592 this_cpu_inc(*md->pending_io);
594 io->split_bio = NULL;
596 spin_lock_init(&io->lock);
597 io->start_time = jiffies;
600 if (static_branch_unlikely(&stats_enabled))
601 dm_stats_record_start(&md->stats, &io->stats_aux);
606 static void free_io(struct dm_io *io)
608 bio_put(&io->tio.clone);
611 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
612 unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
614 struct mapped_device *md = ci->io->md;
615 struct dm_target_io *tio;
618 if (!ci->io->tio.io) {
619 /* the dm_target_io embedded in ci->io is available */
621 /* alloc_io() already initialized embedded clone */
624 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
629 /* REQ_DM_POLL_LIST shouldn't be inherited */
630 clone->bi_opf &= ~REQ_DM_POLL_LIST;
632 tio = clone_to_tio(clone);
633 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
636 tio->magic = DM_TIO_MAGIC;
639 tio->target_bio_nr = target_bio_nr;
643 /* Set default bdev, but target must bio_set_dev() before issuing IO */
644 clone->bi_bdev = md->disk->part0;
645 if (unlikely(ti->needs_bio_set_dev))
646 bio_set_dev(clone, md->disk->part0);
649 clone->bi_iter.bi_size = to_bytes(*len);
650 if (bio_integrity(clone))
651 bio_integrity_trim(clone);
657 static void free_tio(struct bio *clone)
659 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
665 * Add the bio to the list of deferred io.
667 static void queue_io(struct mapped_device *md, struct bio *bio)
671 spin_lock_irqsave(&md->deferred_lock, flags);
672 bio_list_add(&md->deferred, bio);
673 spin_unlock_irqrestore(&md->deferred_lock, flags);
674 queue_work(md->wq, &md->work);
678 * Everyone (including functions in this file), should use this
679 * function to access the md->map field, and make sure they call
680 * dm_put_live_table() when finished.
682 struct dm_table *dm_get_live_table(struct mapped_device *md,
683 int *srcu_idx) __acquires(md->io_barrier)
685 *srcu_idx = srcu_read_lock(&md->io_barrier);
687 return srcu_dereference(md->map, &md->io_barrier);
690 void dm_put_live_table(struct mapped_device *md,
691 int srcu_idx) __releases(md->io_barrier)
693 srcu_read_unlock(&md->io_barrier, srcu_idx);
696 void dm_sync_table(struct mapped_device *md)
698 synchronize_srcu(&md->io_barrier);
699 synchronize_rcu_expedited();
703 * A fast alternative to dm_get_live_table/dm_put_live_table.
704 * The caller must not block between these two functions.
706 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
709 return rcu_dereference(md->map);
712 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
717 static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
718 int *srcu_idx, unsigned bio_opf)
720 if (bio_opf & REQ_NOWAIT)
721 return dm_get_live_table_fast(md);
723 return dm_get_live_table(md, srcu_idx);
726 static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
729 if (bio_opf & REQ_NOWAIT)
730 dm_put_live_table_fast(md);
732 dm_put_live_table(md, srcu_idx);
735 static char *_dm_claim_ptr = "I belong to device-mapper";
738 * Open a table device so we can use it as a map destination.
740 static int open_table_device(struct table_device *td, dev_t dev,
741 struct mapped_device *md)
743 struct block_device *bdev;
747 BUG_ON(td->dm_dev.bdev);
749 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
751 return PTR_ERR(bdev);
753 r = bd_link_disk_holder(bdev, dm_disk(md));
755 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
759 td->dm_dev.bdev = bdev;
760 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
765 * Close a table device that we've been using.
767 static void close_table_device(struct table_device *td, struct mapped_device *md)
769 if (!td->dm_dev.bdev)
772 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
773 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
774 put_dax(td->dm_dev.dax_dev);
775 td->dm_dev.bdev = NULL;
776 td->dm_dev.dax_dev = NULL;
779 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
782 struct table_device *td;
784 list_for_each_entry(td, l, list)
785 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
791 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
792 struct dm_dev **result)
795 struct table_device *td;
797 mutex_lock(&md->table_devices_lock);
798 td = find_table_device(&md->table_devices, dev, mode);
800 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
802 mutex_unlock(&md->table_devices_lock);
806 td->dm_dev.mode = mode;
807 td->dm_dev.bdev = NULL;
809 if ((r = open_table_device(td, dev, md))) {
810 mutex_unlock(&md->table_devices_lock);
815 format_dev_t(td->dm_dev.name, dev);
817 refcount_set(&td->count, 1);
818 list_add(&td->list, &md->table_devices);
820 refcount_inc(&td->count);
822 mutex_unlock(&md->table_devices_lock);
824 *result = &td->dm_dev;
828 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
830 struct table_device *td = container_of(d, struct table_device, dm_dev);
832 mutex_lock(&md->table_devices_lock);
833 if (refcount_dec_and_test(&td->count)) {
834 close_table_device(td, md);
838 mutex_unlock(&md->table_devices_lock);
841 static void free_table_devices(struct list_head *devices)
843 struct list_head *tmp, *next;
845 list_for_each_safe(tmp, next, devices) {
846 struct table_device *td = list_entry(tmp, struct table_device, list);
848 DMWARN("dm_destroy: %s still exists with %d references",
849 td->dm_dev.name, refcount_read(&td->count));
855 * Get the geometry associated with a dm device
857 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
865 * Set the geometry of a device.
867 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
869 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
871 if (geo->start > sz) {
872 DMWARN("Start sector is beyond the geometry limits.");
881 static int __noflush_suspending(struct mapped_device *md)
883 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
886 static void dm_io_complete(struct dm_io *io)
888 blk_status_t io_error;
889 struct mapped_device *md = io->md;
890 struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio;
892 if (io->status == BLK_STS_DM_REQUEUE) {
895 * Target requested pushing back the I/O.
897 spin_lock_irqsave(&md->deferred_lock, flags);
898 if (__noflush_suspending(md) &&
899 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) {
900 /* NOTE early return due to BLK_STS_DM_REQUEUE below */
901 bio_list_add_head(&md->deferred, bio);
904 * noflush suspend was interrupted or this is
905 * a write to a zoned target.
907 io->status = BLK_STS_IOERR;
909 spin_unlock_irqrestore(&md->deferred_lock, flags);
912 io_error = io->status;
913 if (dm_io_flagged(io, DM_IO_ACCOUNTED))
915 else if (!io_error) {
917 * Must handle target that DM_MAPIO_SUBMITTED only to
918 * then bio_endio() rather than dm_submit_bio_remap()
920 __dm_start_io_acct(io);
925 this_cpu_dec(*md->pending_io);
927 /* nudge anyone waiting on suspend queue */
928 if (unlikely(wq_has_sleeper(&md->wait)))
931 if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) {
932 if (bio->bi_opf & REQ_POLLED) {
934 * Upper layer won't help us poll split bio (io->orig_bio
935 * may only reflect a subset of the pre-split original)
936 * so clear REQ_POLLED in case of requeue.
938 bio_clear_polled(bio);
939 if (io_error == BLK_STS_AGAIN) {
940 /* io_uring doesn't handle BLK_STS_AGAIN (yet) */
945 if (io_error == BLK_STS_DM_REQUEUE)
949 if (bio_is_flush_with_data(bio)) {
951 * Preflush done for flush with data, reissue
952 * without REQ_PREFLUSH.
954 bio->bi_opf &= ~REQ_PREFLUSH;
957 /* done with normal IO or empty flush */
959 bio->bi_status = io_error;
965 * Decrements the number of outstanding ios that a bio has been
966 * cloned into, completing the original io if necc.
968 static inline void __dm_io_dec_pending(struct dm_io *io)
970 if (atomic_dec_and_test(&io->io_count))
974 static void dm_io_set_error(struct dm_io *io, blk_status_t error)
978 /* Push-back supersedes any I/O errors */
979 spin_lock_irqsave(&io->lock, flags);
980 if (!(io->status == BLK_STS_DM_REQUEUE &&
981 __noflush_suspending(io->md))) {
984 spin_unlock_irqrestore(&io->lock, flags);
987 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
990 dm_io_set_error(io, error);
992 __dm_io_dec_pending(io);
995 void disable_discard(struct mapped_device *md)
997 struct queue_limits *limits = dm_get_queue_limits(md);
999 /* device doesn't really support DISCARD, disable it */
1000 limits->max_discard_sectors = 0;
1003 void disable_write_zeroes(struct mapped_device *md)
1005 struct queue_limits *limits = dm_get_queue_limits(md);
1007 /* device doesn't really support WRITE ZEROES, disable it */
1008 limits->max_write_zeroes_sectors = 0;
1011 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1013 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
1016 static void clone_endio(struct bio *bio)
1018 blk_status_t error = bio->bi_status;
1019 struct dm_target_io *tio = clone_to_tio(bio);
1020 struct dm_target *ti = tio->ti;
1021 dm_endio_fn endio = ti->type->end_io;
1022 struct dm_io *io = tio->io;
1023 struct mapped_device *md = io->md;
1025 if (unlikely(error == BLK_STS_TARGET)) {
1026 if (bio_op(bio) == REQ_OP_DISCARD &&
1027 !bdev_max_discard_sectors(bio->bi_bdev))
1028 disable_discard(md);
1029 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1030 !bdev_write_zeroes_sectors(bio->bi_bdev))
1031 disable_write_zeroes(md);
1034 if (static_branch_unlikely(&zoned_enabled) &&
1035 unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
1036 dm_zone_endio(io, bio);
1039 int r = endio(ti, bio, &error);
1041 case DM_ENDIO_REQUEUE:
1042 if (static_branch_unlikely(&zoned_enabled)) {
1044 * Requeuing writes to a sequential zone of a zoned
1045 * target will break the sequential write pattern:
1048 if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1049 error = BLK_STS_IOERR;
1051 error = BLK_STS_DM_REQUEUE;
1053 error = BLK_STS_DM_REQUEUE;
1057 case DM_ENDIO_INCOMPLETE:
1058 /* The target will handle the io */
1061 DMWARN("unimplemented target endio return value: %d", r);
1066 if (static_branch_unlikely(&swap_bios_enabled) &&
1067 unlikely(swap_bios_limit(ti, bio)))
1068 up(&md->swap_bios_semaphore);
1071 dm_io_dec_pending(io, error);
1075 * Return maximum size of I/O possible at the supplied sector up to the current
1078 static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
1079 sector_t target_offset)
1081 return ti->len - target_offset;
1084 static sector_t max_io_len(struct dm_target *ti, sector_t sector)
1086 sector_t target_offset = dm_target_offset(ti, sector);
1087 sector_t len = max_io_len_target_boundary(ti, target_offset);
1091 * Does the target need to split IO even further?
1092 * - varied (per target) IO splitting is a tenet of DM; this
1093 * explains why stacked chunk_sectors based splitting via
1094 * blk_max_size_offset() isn't possible here. So pass in
1095 * ti->max_io_len to override stacked chunk_sectors.
1097 if (ti->max_io_len) {
1098 max_len = blk_max_size_offset(ti->table->md->queue,
1099 target_offset, ti->max_io_len);
1107 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1109 if (len > UINT_MAX) {
1110 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1111 (unsigned long long)len, UINT_MAX);
1112 ti->error = "Maximum size of target IO is too large";
1116 ti->max_io_len = (uint32_t) len;
1120 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1122 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1123 sector_t sector, int *srcu_idx)
1124 __acquires(md->io_barrier)
1126 struct dm_table *map;
1127 struct dm_target *ti;
1129 map = dm_get_live_table(md, srcu_idx);
1133 ti = dm_table_find_target(map, sector);
1140 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1141 long nr_pages, enum dax_access_mode mode, void **kaddr,
1144 struct mapped_device *md = dax_get_private(dax_dev);
1145 sector_t sector = pgoff * PAGE_SECTORS;
1146 struct dm_target *ti;
1147 long len, ret = -EIO;
1150 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1154 if (!ti->type->direct_access)
1156 len = max_io_len(ti, sector) / PAGE_SECTORS;
1159 nr_pages = min(len, nr_pages);
1160 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1163 dm_put_live_table(md, srcu_idx);
1168 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1171 struct mapped_device *md = dax_get_private(dax_dev);
1172 sector_t sector = pgoff * PAGE_SECTORS;
1173 struct dm_target *ti;
1177 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1181 if (WARN_ON(!ti->type->dax_zero_page_range)) {
1183 * ->zero_page_range() is mandatory dax operation. If we are
1184 * here, something is wrong.
1188 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1190 dm_put_live_table(md, srcu_idx);
1195 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
1196 void *addr, size_t bytes, struct iov_iter *i)
1198 struct mapped_device *md = dax_get_private(dax_dev);
1199 sector_t sector = pgoff * PAGE_SECTORS;
1200 struct dm_target *ti;
1204 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1205 if (!ti || !ti->type->dax_recovery_write)
1208 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1210 dm_put_live_table(md, srcu_idx);
1215 * A target may call dm_accept_partial_bio only from the map routine. It is
1216 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1217 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
1218 * __send_duplicate_bios().
1220 * dm_accept_partial_bio informs the dm that the target only wants to process
1221 * additional n_sectors sectors of the bio and the rest of the data should be
1222 * sent in a next bio.
1224 * A diagram that explains the arithmetics:
1225 * +--------------------+---------------+-------+
1227 * +--------------------+---------------+-------+
1229 * <-------------- *tio->len_ptr --------------->
1230 * <----- bio_sectors ----->
1233 * Region 1 was already iterated over with bio_advance or similar function.
1234 * (it may be empty if the target doesn't use bio_advance)
1235 * Region 2 is the remaining bio size that the target wants to process.
1236 * (it may be empty if region 1 is non-empty, although there is no reason
1238 * The target requires that region 3 is to be sent in the next bio.
1240 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1241 * the partially processed part (the sum of regions 1+2) must be the same for all
1242 * copies of the bio.
1244 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1246 struct dm_target_io *tio = clone_to_tio(bio);
1247 unsigned bio_sectors = bio_sectors(bio);
1249 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
1250 BUG_ON(op_is_zone_mgmt(bio_op(bio)));
1251 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
1252 BUG_ON(bio_sectors > *tio->len_ptr);
1253 BUG_ON(n_sectors > bio_sectors);
1255 *tio->len_ptr -= bio_sectors - n_sectors;
1256 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1259 * __split_and_process_bio() may have already saved mapped part
1260 * for accounting but it is being reduced so update accordingly.
1262 dm_io_set_flag(tio->io, DM_IO_WAS_SPLIT);
1263 tio->io->sectors = n_sectors;
1265 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1268 * @clone: clone bio that DM core passed to target's .map function
1269 * @tgt_clone: clone of @clone bio that target needs submitted
1271 * Targets should use this interface to submit bios they take
1272 * ownership of when returning DM_MAPIO_SUBMITTED.
1274 * Target should also enable ti->accounts_remapped_io
1276 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
1278 struct dm_target_io *tio = clone_to_tio(clone);
1279 struct dm_io *io = tio->io;
1281 /* establish bio that will get submitted */
1286 * Account io->origin_bio to DM dev on behalf of target
1287 * that took ownership of IO with DM_MAPIO_SUBMITTED.
1289 dm_start_io_acct(io, clone);
1291 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
1293 submit_bio_noacct(tgt_clone);
1295 EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
1297 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1299 mutex_lock(&md->swap_bios_lock);
1300 while (latch < md->swap_bios) {
1302 down(&md->swap_bios_semaphore);
1305 while (latch > md->swap_bios) {
1307 up(&md->swap_bios_semaphore);
1310 mutex_unlock(&md->swap_bios_lock);
1313 static void __map_bio(struct bio *clone)
1315 struct dm_target_io *tio = clone_to_tio(clone);
1316 struct dm_target *ti = tio->ti;
1317 struct dm_io *io = tio->io;
1318 struct mapped_device *md = io->md;
1321 clone->bi_end_io = clone_endio;
1326 tio->old_sector = clone->bi_iter.bi_sector;
1328 if (static_branch_unlikely(&swap_bios_enabled) &&
1329 unlikely(swap_bios_limit(ti, clone))) {
1330 int latch = get_swap_bios();
1331 if (unlikely(latch != md->swap_bios))
1332 __set_swap_bios_limit(md, latch);
1333 down(&md->swap_bios_semaphore);
1336 if (static_branch_unlikely(&zoned_enabled)) {
1338 * Check if the IO needs a special mapping due to zone append
1339 * emulation on zoned target. In this case, dm_zone_map_bio()
1340 * calls the target map operation.
1342 if (unlikely(dm_emulate_zone_append(md)))
1343 r = dm_zone_map_bio(tio);
1345 r = ti->type->map(ti, clone);
1347 r = ti->type->map(ti, clone);
1350 case DM_MAPIO_SUBMITTED:
1351 /* target has assumed ownership of this io */
1352 if (!ti->accounts_remapped_io)
1353 dm_start_io_acct(io, clone);
1355 case DM_MAPIO_REMAPPED:
1356 dm_submit_bio_remap(clone, NULL);
1359 case DM_MAPIO_REQUEUE:
1360 if (static_branch_unlikely(&swap_bios_enabled) &&
1361 unlikely(swap_bios_limit(ti, clone)))
1362 up(&md->swap_bios_semaphore);
1364 if (r == DM_MAPIO_KILL)
1365 dm_io_dec_pending(io, BLK_STS_IOERR);
1367 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1370 DMWARN("unimplemented target map return value: %d", r);
1375 static void setup_split_accounting(struct clone_info *ci, unsigned len)
1377 struct dm_io *io = ci->io;
1379 if (ci->sector_count > len) {
1381 * Split needed, save the mapped part for accounting.
1382 * NOTE: dm_accept_partial_bio() will update accordingly.
1384 dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1388 if (static_branch_unlikely(&stats_enabled) &&
1389 unlikely(dm_stats_used(&io->md->stats))) {
1391 * Save bi_sector in terms of its offset from end of
1392 * original bio, only needed for DM-stats' benefit.
1393 * - saved regardless of whether split needed so that
1394 * dm_accept_partial_bio() doesn't need to.
1396 io->sector_offset = bio_end_sector(ci->bio) - ci->sector;
1400 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1401 struct dm_target *ti, unsigned num_bios)
1406 for (try = 0; try < 2; try++) {
1410 mutex_lock(&ci->io->md->table_devices_lock);
1411 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1412 bio = alloc_tio(ci, ti, bio_nr, NULL,
1413 try ? GFP_NOIO : GFP_NOWAIT);
1417 bio_list_add(blist, bio);
1420 mutex_unlock(&ci->io->md->table_devices_lock);
1421 if (bio_nr == num_bios)
1424 while ((bio = bio_list_pop(blist)))
1429 static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1430 unsigned num_bios, unsigned *len)
1432 struct bio_list blist = BIO_EMPTY_LIST;
1441 setup_split_accounting(ci, *len);
1442 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
1447 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
1448 alloc_multiple_bios(&blist, ci, ti, num_bios);
1449 while ((clone = bio_list_pop(&blist))) {
1450 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
1460 static void __send_empty_flush(struct clone_info *ci)
1462 unsigned target_nr = 0;
1463 struct dm_target *ti;
1464 struct bio flush_bio;
1467 * Use an on-stack bio for this, it's safe since we don't
1468 * need to reference it after submit. It's just used as
1469 * the basis for the clone(s).
1471 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
1472 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
1474 ci->bio = &flush_bio;
1475 ci->sector_count = 0;
1476 ci->io->tio.clone.bi_iter.bi_size = 0;
1478 while ((ti = dm_table_get_target(ci->map, target_nr++))) {
1481 atomic_add(ti->num_flush_bios, &ci->io->io_count);
1482 bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1483 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
1487 * alloc_io() takes one extra reference for submission, so the
1488 * reference won't reach 0 without the following subtraction
1490 atomic_sub(1, &ci->io->io_count);
1492 bio_uninit(ci->bio);
1495 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1501 len = min_t(sector_t, ci->sector_count,
1502 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
1504 atomic_add(num_bios, &ci->io->io_count);
1505 bios = __send_duplicate_bios(ci, ti, num_bios, &len);
1507 * alloc_io() takes one extra reference for submission, so the
1508 * reference won't reach 0 without the following (+1) subtraction
1510 atomic_sub(num_bios - bios + 1, &ci->io->io_count);
1513 ci->sector_count -= len;
1516 static bool is_abnormal_io(struct bio *bio)
1518 unsigned int op = bio_op(bio);
1520 if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
1522 case REQ_OP_DISCARD:
1523 case REQ_OP_SECURE_ERASE:
1524 case REQ_OP_WRITE_ZEROES:
1534 static blk_status_t __process_abnormal_io(struct clone_info *ci,
1535 struct dm_target *ti)
1537 unsigned num_bios = 0;
1539 switch (bio_op(ci->bio)) {
1540 case REQ_OP_DISCARD:
1541 num_bios = ti->num_discard_bios;
1543 case REQ_OP_SECURE_ERASE:
1544 num_bios = ti->num_secure_erase_bios;
1546 case REQ_OP_WRITE_ZEROES:
1547 num_bios = ti->num_write_zeroes_bios;
1552 * Even though the device advertised support for this type of
1553 * request, that does not mean every target supports it, and
1554 * reconfiguration might also have changed that since the
1555 * check was performed.
1557 if (unlikely(!num_bios))
1558 return BLK_STS_NOTSUPP;
1560 __send_changing_extent_only(ci, ti, num_bios);
1565 * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1566 * associated with this bio, and this bio's bi_private needs to be
1567 * stored in dm_io->data before the reuse.
1569 * bio->bi_private is owned by fs or upper layer, so block layer won't
1570 * touch it after splitting. Meantime it won't be changed by anyone after
1571 * bio is submitted. So this reuse is safe.
1573 static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1575 return (struct dm_io **)&bio->bi_private;
1578 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1580 struct dm_io **head = dm_poll_list_head(bio);
1582 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1583 bio->bi_opf |= REQ_DM_POLL_LIST;
1585 * Save .bi_private into dm_io, so that we can reuse
1586 * .bi_private as dm_io list head for storing dm_io list
1588 io->data = bio->bi_private;
1590 /* tell block layer to poll for completion */
1591 bio->bi_cookie = ~BLK_QC_T_NONE;
1596 * bio recursed due to split, reuse original poll list,
1597 * and save bio->bi_private too.
1599 io->data = (*head)->data;
1607 * Select the correct strategy for processing a non-flush bio.
1609 static blk_status_t __split_and_process_bio(struct clone_info *ci)
1612 struct dm_target *ti;
1615 ti = dm_table_find_target(ci->map, ci->sector);
1617 return BLK_STS_IOERR;
1619 if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
1620 unlikely(!dm_target_supports_nowait(ti->type)))
1621 return BLK_STS_NOTSUPP;
1623 if (unlikely(ci->is_abnormal_io))
1624 return __process_abnormal_io(ci, ti);
1627 * Only support bio polling for normal IO, and the target io is
1628 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
1630 ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
1632 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1633 setup_split_accounting(ci, len);
1634 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
1638 ci->sector_count -= len;
1643 static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1644 struct dm_table *map, struct bio *bio, bool is_abnormal)
1647 ci->io = alloc_io(md, bio);
1649 ci->is_abnormal_io = is_abnormal;
1650 ci->submit_as_polled = false;
1651 ci->sector = bio->bi_iter.bi_sector;
1652 ci->sector_count = bio_sectors(bio);
1654 /* Shouldn't happen but sector_count was being set to 0 so... */
1655 if (static_branch_unlikely(&zoned_enabled) &&
1656 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1657 ci->sector_count = 0;
1661 * Entry point to split a bio into clones and submit them to the targets.
1663 static void dm_split_and_process_bio(struct mapped_device *md,
1664 struct dm_table *map, struct bio *bio)
1666 struct clone_info ci;
1668 blk_status_t error = BLK_STS_OK;
1671 is_abnormal = is_abnormal_io(bio);
1672 if (unlikely(is_abnormal)) {
1674 * Use blk_queue_split() for abnormal IO (e.g. discard, etc)
1675 * otherwise associated queue_limits won't be imposed.
1677 blk_queue_split(&bio);
1680 init_clone_info(&ci, md, map, bio, is_abnormal);
1683 if (bio->bi_opf & REQ_PREFLUSH) {
1684 __send_empty_flush(&ci);
1685 /* dm_io_complete submits any data associated with flush */
1689 error = __split_and_process_bio(&ci);
1690 if (error || !ci.sector_count)
1693 * Remainder must be passed to submit_bio_noacct() so it gets handled
1694 * *after* bios already submitted have been completely processed.
1696 WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT));
1697 io->split_bio = bio_split(bio, io->sectors, GFP_NOIO,
1698 &md->queue->bio_split);
1699 bio_chain(io->split_bio, bio);
1700 trace_block_split(io->split_bio, bio->bi_iter.bi_sector);
1701 submit_bio_noacct(bio);
1704 * Drop the extra reference count for non-POLLED bio, and hold one
1705 * reference for POLLED bio, which will be released in dm_poll_bio
1707 * Add every dm_io instance into the dm_io list head which is stored
1708 * in bio->bi_private, so that dm_poll_bio can poll them all.
1710 if (error || !ci.submit_as_polled) {
1712 * In case of submission failure, the extra reference for
1713 * submitting io isn't consumed yet
1716 atomic_dec(&io->io_count);
1717 dm_io_dec_pending(io, error);
1719 dm_queue_poll_io(bio, io);
1722 static void dm_submit_bio(struct bio *bio)
1724 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1726 struct dm_table *map;
1727 unsigned bio_opf = bio->bi_opf;
1729 map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
1731 /* If suspended, or map not yet available, queue this IO for later */
1732 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
1734 if (bio->bi_opf & REQ_NOWAIT)
1735 bio_wouldblock_error(bio);
1736 else if (bio->bi_opf & REQ_RAHEAD)
1743 dm_split_and_process_bio(md, map, bio);
1745 dm_put_live_table_bio(md, srcu_idx, bio_opf);
1748 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
1751 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
1753 /* don't poll if the mapped io is done */
1754 if (atomic_read(&io->io_count) > 1)
1755 bio_poll(&io->tio.clone, iob, flags);
1757 /* bio_poll holds the last reference */
1758 return atomic_read(&io->io_count) == 1;
1761 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
1764 struct dm_io **head = dm_poll_list_head(bio);
1765 struct dm_io *list = *head;
1766 struct dm_io *tmp = NULL;
1767 struct dm_io *curr, *next;
1769 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
1770 if (!(bio->bi_opf & REQ_DM_POLL_LIST))
1773 WARN_ON_ONCE(!list);
1776 * Restore .bi_private before possibly completing dm_io.
1778 * bio_poll() is only possible once @bio has been completely
1779 * submitted via submit_bio_noacct()'s depth-first submission.
1780 * So there is no dm_queue_poll_io() race associated with
1781 * clearing REQ_DM_POLL_LIST here.
1783 bio->bi_opf &= ~REQ_DM_POLL_LIST;
1784 bio->bi_private = list->data;
1786 for (curr = list, next = curr->next; curr; curr = next, next =
1787 curr ? curr->next : NULL) {
1788 if (dm_poll_dm_io(curr, iob, flags)) {
1790 * clone_endio() has already occurred, so no
1791 * error handling is needed here.
1793 __dm_io_dec_pending(curr);
1802 bio->bi_opf |= REQ_DM_POLL_LIST;
1803 /* Reset bio->bi_private to dm_io list head */
1810 /*-----------------------------------------------------------------
1811 * An IDR is used to keep track of allocated minor numbers.
1812 *---------------------------------------------------------------*/
1813 static void free_minor(int minor)
1815 spin_lock(&_minor_lock);
1816 idr_remove(&_minor_idr, minor);
1817 spin_unlock(&_minor_lock);
1821 * See if the device with a specific minor # is free.
1823 static int specific_minor(int minor)
1827 if (minor >= (1 << MINORBITS))
1830 idr_preload(GFP_KERNEL);
1831 spin_lock(&_minor_lock);
1833 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1835 spin_unlock(&_minor_lock);
1838 return r == -ENOSPC ? -EBUSY : r;
1842 static int next_free_minor(int *minor)
1846 idr_preload(GFP_KERNEL);
1847 spin_lock(&_minor_lock);
1849 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1851 spin_unlock(&_minor_lock);
1859 static const struct block_device_operations dm_blk_dops;
1860 static const struct block_device_operations dm_rq_blk_dops;
1861 static const struct dax_operations dm_dax_ops;
1863 static void dm_wq_work(struct work_struct *work);
1865 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1866 static void dm_queue_destroy_crypto_profile(struct request_queue *q)
1868 dm_destroy_crypto_profile(q->crypto_profile);
1871 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1873 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
1876 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1878 static void cleanup_mapped_device(struct mapped_device *md)
1881 destroy_workqueue(md->wq);
1882 dm_free_md_mempools(md->mempools);
1885 dax_remove_host(md->disk);
1886 kill_dax(md->dax_dev);
1887 put_dax(md->dax_dev);
1891 dm_cleanup_zoned_dev(md);
1893 spin_lock(&_minor_lock);
1894 md->disk->private_data = NULL;
1895 spin_unlock(&_minor_lock);
1896 if (dm_get_md_type(md) != DM_TYPE_NONE) {
1898 del_gendisk(md->disk);
1900 dm_queue_destroy_crypto_profile(md->queue);
1901 blk_cleanup_disk(md->disk);
1904 if (md->pending_io) {
1905 free_percpu(md->pending_io);
1906 md->pending_io = NULL;
1909 cleanup_srcu_struct(&md->io_barrier);
1911 mutex_destroy(&md->suspend_lock);
1912 mutex_destroy(&md->type_lock);
1913 mutex_destroy(&md->table_devices_lock);
1914 mutex_destroy(&md->swap_bios_lock);
1916 dm_mq_cleanup_mapped_device(md);
1920 * Allocate and initialise a blank device with a given minor.
1922 static struct mapped_device *alloc_dev(int minor)
1924 int r, numa_node_id = dm_get_numa_node();
1925 struct mapped_device *md;
1928 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1930 DMWARN("unable to allocate device, out of memory.");
1934 if (!try_module_get(THIS_MODULE))
1935 goto bad_module_get;
1937 /* get a minor number for the dev */
1938 if (minor == DM_ANY_MINOR)
1939 r = next_free_minor(&minor);
1941 r = specific_minor(minor);
1945 r = init_srcu_struct(&md->io_barrier);
1947 goto bad_io_barrier;
1949 md->numa_node_id = numa_node_id;
1950 md->init_tio_pdu = false;
1951 md->type = DM_TYPE_NONE;
1952 mutex_init(&md->suspend_lock);
1953 mutex_init(&md->type_lock);
1954 mutex_init(&md->table_devices_lock);
1955 spin_lock_init(&md->deferred_lock);
1956 atomic_set(&md->holders, 1);
1957 atomic_set(&md->open_count, 0);
1958 atomic_set(&md->event_nr, 0);
1959 atomic_set(&md->uevent_seq, 0);
1960 INIT_LIST_HEAD(&md->uevent_list);
1961 INIT_LIST_HEAD(&md->table_devices);
1962 spin_lock_init(&md->uevent_lock);
1965 * default to bio-based until DM table is loaded and md->type
1966 * established. If request-based table is loaded: blk-mq will
1967 * override accordingly.
1969 md->disk = blk_alloc_disk(md->numa_node_id);
1972 md->queue = md->disk->queue;
1974 init_waitqueue_head(&md->wait);
1975 INIT_WORK(&md->work, dm_wq_work);
1976 init_waitqueue_head(&md->eventq);
1977 init_completion(&md->kobj_holder.completion);
1979 md->swap_bios = get_swap_bios();
1980 sema_init(&md->swap_bios_semaphore, md->swap_bios);
1981 mutex_init(&md->swap_bios_lock);
1983 md->disk->major = _major;
1984 md->disk->first_minor = minor;
1985 md->disk->minors = 1;
1986 md->disk->flags |= GENHD_FL_NO_PART;
1987 md->disk->fops = &dm_blk_dops;
1988 md->disk->queue = md->queue;
1989 md->disk->private_data = md;
1990 sprintf(md->disk->disk_name, "dm-%d", minor);
1992 if (IS_ENABLED(CONFIG_FS_DAX)) {
1993 md->dax_dev = alloc_dax(md, &dm_dax_ops);
1994 if (IS_ERR(md->dax_dev)) {
1998 set_dax_nocache(md->dax_dev);
1999 set_dax_nomc(md->dax_dev);
2000 if (dax_add_host(md->dax_dev, md->disk))
2004 format_dev_t(md->name, MKDEV(_major, minor));
2006 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
2010 md->pending_io = alloc_percpu(unsigned long);
2011 if (!md->pending_io)
2014 dm_stats_init(&md->stats);
2016 /* Populate the mapping, nobody knows we exist yet */
2017 spin_lock(&_minor_lock);
2018 old_md = idr_replace(&_minor_idr, md, minor);
2019 spin_unlock(&_minor_lock);
2021 BUG_ON(old_md != MINOR_ALLOCED);
2026 cleanup_mapped_device(md);
2030 module_put(THIS_MODULE);
2036 static void unlock_fs(struct mapped_device *md);
2038 static void free_dev(struct mapped_device *md)
2040 int minor = MINOR(disk_devt(md->disk));
2044 cleanup_mapped_device(md);
2046 free_table_devices(&md->table_devices);
2047 dm_stats_cleanup(&md->stats);
2050 module_put(THIS_MODULE);
2055 * Bind a table to the device.
2057 static void event_callback(void *context)
2059 unsigned long flags;
2061 struct mapped_device *md = (struct mapped_device *) context;
2063 spin_lock_irqsave(&md->uevent_lock, flags);
2064 list_splice_init(&md->uevent_list, &uevents);
2065 spin_unlock_irqrestore(&md->uevent_lock, flags);
2067 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2069 atomic_inc(&md->event_nr);
2070 wake_up(&md->eventq);
2071 dm_issue_global_event();
2075 * Returns old map, which caller must destroy.
2077 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2078 struct queue_limits *limits)
2080 struct dm_table *old_map;
2084 lockdep_assert_held(&md->suspend_lock);
2086 size = dm_table_get_size(t);
2089 * Wipe any geometry if the size of the table changed.
2091 if (size != dm_get_size(md))
2092 memset(&md->geometry, 0, sizeof(md->geometry));
2094 if (!get_capacity(md->disk))
2095 set_capacity(md->disk, size);
2097 set_capacity_and_notify(md->disk, size);
2099 dm_table_event_callback(t, event_callback, md);
2101 if (dm_table_request_based(t)) {
2103 * Leverage the fact that request-based DM targets are
2104 * immutable singletons - used to optimize dm_mq_queue_rq.
2106 md->immutable_target = dm_table_get_immutable_target(t);
2109 * There is no need to reload with request-based dm because the
2110 * size of front_pad doesn't change.
2112 * Note for future: If you are to reload bioset, prep-ed
2113 * requests in the queue may refer to bio from the old bioset,
2114 * so you must walk through the queue to unprep.
2116 if (!md->mempools) {
2117 md->mempools = t->mempools;
2122 * The md may already have mempools that need changing.
2123 * If so, reload bioset because front_pad may have changed
2124 * because a different table was loaded.
2126 dm_free_md_mempools(md->mempools);
2127 md->mempools = t->mempools;
2131 ret = dm_table_set_restrictions(t, md->queue, limits);
2133 old_map = ERR_PTR(ret);
2137 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2138 rcu_assign_pointer(md->map, (void *)t);
2139 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2148 * Returns unbound table for the caller to free.
2150 static struct dm_table *__unbind(struct mapped_device *md)
2152 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2157 dm_table_event_callback(map, NULL, NULL);
2158 RCU_INIT_POINTER(md->map, NULL);
2165 * Constructor for a new device.
2167 int dm_create(int minor, struct mapped_device **result)
2169 struct mapped_device *md;
2171 md = alloc_dev(minor);
2175 dm_ima_reset_data(md);
2182 * Functions to manage md->type.
2183 * All are required to hold md->type_lock.
2185 void dm_lock_md_type(struct mapped_device *md)
2187 mutex_lock(&md->type_lock);
2190 void dm_unlock_md_type(struct mapped_device *md)
2192 mutex_unlock(&md->type_lock);
2195 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2197 BUG_ON(!mutex_is_locked(&md->type_lock));
2201 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2206 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2208 return md->immutable_target_type;
2212 * The queue_limits are only valid as long as you have a reference
2215 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2217 BUG_ON(!atomic_read(&md->holders));
2218 return &md->queue->limits;
2220 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2223 * Setup the DM device's queue based on md's type
2225 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2227 enum dm_queue_mode type = dm_table_get_type(t);
2228 struct queue_limits limits;
2232 case DM_TYPE_REQUEST_BASED:
2233 md->disk->fops = &dm_rq_blk_dops;
2234 r = dm_mq_init_request_queue(md, t);
2236 DMERR("Cannot initialize queue for request-based dm mapped device");
2240 case DM_TYPE_BIO_BASED:
2241 case DM_TYPE_DAX_BIO_BASED:
2248 r = dm_calculate_queue_limits(t, &limits);
2250 DMERR("Cannot calculate initial queue limits");
2253 r = dm_table_set_restrictions(t, md->queue, &limits);
2257 r = add_disk(md->disk);
2261 r = dm_sysfs_init(md);
2263 del_gendisk(md->disk);
2270 struct mapped_device *dm_get_md(dev_t dev)
2272 struct mapped_device *md;
2273 unsigned minor = MINOR(dev);
2275 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2278 spin_lock(&_minor_lock);
2280 md = idr_find(&_minor_idr, minor);
2281 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2282 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2288 spin_unlock(&_minor_lock);
2292 EXPORT_SYMBOL_GPL(dm_get_md);
2294 void *dm_get_mdptr(struct mapped_device *md)
2296 return md->interface_ptr;
2299 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2301 md->interface_ptr = ptr;
2304 void dm_get(struct mapped_device *md)
2306 atomic_inc(&md->holders);
2307 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2310 int dm_hold(struct mapped_device *md)
2312 spin_lock(&_minor_lock);
2313 if (test_bit(DMF_FREEING, &md->flags)) {
2314 spin_unlock(&_minor_lock);
2318 spin_unlock(&_minor_lock);
2321 EXPORT_SYMBOL_GPL(dm_hold);
2323 const char *dm_device_name(struct mapped_device *md)
2327 EXPORT_SYMBOL_GPL(dm_device_name);
2329 static void __dm_destroy(struct mapped_device *md, bool wait)
2331 struct dm_table *map;
2336 spin_lock(&_minor_lock);
2337 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2338 set_bit(DMF_FREEING, &md->flags);
2339 spin_unlock(&_minor_lock);
2341 blk_mark_disk_dead(md->disk);
2344 * Take suspend_lock so that presuspend and postsuspend methods
2345 * do not race with internal suspend.
2347 mutex_lock(&md->suspend_lock);
2348 map = dm_get_live_table(md, &srcu_idx);
2349 if (!dm_suspended_md(md)) {
2350 dm_table_presuspend_targets(map);
2351 set_bit(DMF_SUSPENDED, &md->flags);
2352 set_bit(DMF_POST_SUSPENDING, &md->flags);
2353 dm_table_postsuspend_targets(map);
2355 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2356 dm_put_live_table(md, srcu_idx);
2357 mutex_unlock(&md->suspend_lock);
2360 * Rare, but there may be I/O requests still going to complete,
2361 * for example. Wait for all references to disappear.
2362 * No one should increment the reference count of the mapped_device,
2363 * after the mapped_device state becomes DMF_FREEING.
2366 while (atomic_read(&md->holders))
2368 else if (atomic_read(&md->holders))
2369 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2370 dm_device_name(md), atomic_read(&md->holders));
2372 dm_table_destroy(__unbind(md));
2376 void dm_destroy(struct mapped_device *md)
2378 __dm_destroy(md, true);
2381 void dm_destroy_immediate(struct mapped_device *md)
2383 __dm_destroy(md, false);
2386 void dm_put(struct mapped_device *md)
2388 atomic_dec(&md->holders);
2390 EXPORT_SYMBOL_GPL(dm_put);
2392 static bool dm_in_flight_bios(struct mapped_device *md)
2395 unsigned long sum = 0;
2397 for_each_possible_cpu(cpu)
2398 sum += *per_cpu_ptr(md->pending_io, cpu);
2403 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
2409 prepare_to_wait(&md->wait, &wait, task_state);
2411 if (!dm_in_flight_bios(md))
2414 if (signal_pending_state(task_state, current)) {
2421 finish_wait(&md->wait, &wait);
2428 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
2432 if (!queue_is_mq(md->queue))
2433 return dm_wait_for_bios_completion(md, task_state);
2436 if (!blk_mq_queue_inflight(md->queue))
2439 if (signal_pending_state(task_state, current)) {
2451 * Process the deferred bios
2453 static void dm_wq_work(struct work_struct *work)
2455 struct mapped_device *md = container_of(work, struct mapped_device, work);
2458 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2459 spin_lock_irq(&md->deferred_lock);
2460 bio = bio_list_pop(&md->deferred);
2461 spin_unlock_irq(&md->deferred_lock);
2466 submit_bio_noacct(bio);
2470 static void dm_queue_flush(struct mapped_device *md)
2472 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2473 smp_mb__after_atomic();
2474 queue_work(md->wq, &md->work);
2478 * Swap in a new table, returning the old one for the caller to destroy.
2480 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2482 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2483 struct queue_limits limits;
2486 mutex_lock(&md->suspend_lock);
2488 /* device must be suspended */
2489 if (!dm_suspended_md(md))
2493 * If the new table has no data devices, retain the existing limits.
2494 * This helps multipath with queue_if_no_path if all paths disappear,
2495 * then new I/O is queued based on these limits, and then some paths
2498 if (dm_table_has_no_data_devices(table)) {
2499 live_map = dm_get_live_table_fast(md);
2501 limits = md->queue->limits;
2502 dm_put_live_table_fast(md);
2506 r = dm_calculate_queue_limits(table, &limits);
2513 map = __bind(md, table, &limits);
2514 dm_issue_global_event();
2517 mutex_unlock(&md->suspend_lock);
2522 * Functions to lock and unlock any filesystem running on the
2525 static int lock_fs(struct mapped_device *md)
2529 WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2531 r = freeze_bdev(md->disk->part0);
2533 set_bit(DMF_FROZEN, &md->flags);
2537 static void unlock_fs(struct mapped_device *md)
2539 if (!test_bit(DMF_FROZEN, &md->flags))
2541 thaw_bdev(md->disk->part0);
2542 clear_bit(DMF_FROZEN, &md->flags);
2546 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2547 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2548 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2550 * If __dm_suspend returns 0, the device is completely quiescent
2551 * now. There is no request-processing activity. All new requests
2552 * are being added to md->deferred list.
2554 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2555 unsigned suspend_flags, unsigned int task_state,
2556 int dmf_suspended_flag)
2558 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2559 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2562 lockdep_assert_held(&md->suspend_lock);
2565 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2566 * This flag is cleared before dm_suspend returns.
2569 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2571 DMDEBUG("%s: suspending with flush", dm_device_name(md));
2574 * This gets reverted if there's an error later and the targets
2575 * provide the .presuspend_undo hook.
2577 dm_table_presuspend_targets(map);
2580 * Flush I/O to the device.
2581 * Any I/O submitted after lock_fs() may not be flushed.
2582 * noflush takes precedence over do_lockfs.
2583 * (lock_fs() flushes I/Os and waits for them to complete.)
2585 if (!noflush && do_lockfs) {
2588 dm_table_presuspend_undo_targets(map);
2594 * Here we must make sure that no processes are submitting requests
2595 * to target drivers i.e. no one may be executing
2596 * dm_split_and_process_bio from dm_submit_bio.
2598 * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
2599 * we take the write lock. To prevent any process from reentering
2600 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
2601 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
2602 * flush_workqueue(md->wq).
2604 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2606 synchronize_srcu(&md->io_barrier);
2609 * Stop md->queue before flushing md->wq in case request-based
2610 * dm defers requests to md->wq from md->queue.
2612 if (dm_request_based(md))
2613 dm_stop_queue(md->queue);
2615 flush_workqueue(md->wq);
2618 * At this point no more requests are entering target request routines.
2619 * We call dm_wait_for_completion to wait for all existing requests
2622 r = dm_wait_for_completion(md, task_state);
2624 set_bit(dmf_suspended_flag, &md->flags);
2627 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2629 synchronize_srcu(&md->io_barrier);
2631 /* were we interrupted ? */
2635 if (dm_request_based(md))
2636 dm_start_queue(md->queue);
2639 dm_table_presuspend_undo_targets(map);
2640 /* pushback list is already flushed, so skip flush */
2647 * We need to be able to change a mapping table under a mounted
2648 * filesystem. For example we might want to move some data in
2649 * the background. Before the table can be swapped with
2650 * dm_bind_table, dm_suspend must be called to flush any in
2651 * flight bios and ensure that any further io gets deferred.
2654 * Suspend mechanism in request-based dm.
2656 * 1. Flush all I/Os by lock_fs() if needed.
2657 * 2. Stop dispatching any I/O by stopping the request_queue.
2658 * 3. Wait for all in-flight I/Os to be completed or requeued.
2660 * To abort suspend, start the request_queue.
2662 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2664 struct dm_table *map = NULL;
2668 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2670 if (dm_suspended_md(md)) {
2675 if (dm_suspended_internally_md(md)) {
2676 /* already internally suspended, wait for internal resume */
2677 mutex_unlock(&md->suspend_lock);
2678 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2684 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2686 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2690 set_bit(DMF_POST_SUSPENDING, &md->flags);
2691 dm_table_postsuspend_targets(map);
2692 clear_bit(DMF_POST_SUSPENDING, &md->flags);
2695 mutex_unlock(&md->suspend_lock);
2699 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2702 int r = dm_table_resume_targets(map);
2710 * Flushing deferred I/Os must be done after targets are resumed
2711 * so that mapping of targets can work correctly.
2712 * Request-based dm is queueing the deferred I/Os in its request_queue.
2714 if (dm_request_based(md))
2715 dm_start_queue(md->queue);
2722 int dm_resume(struct mapped_device *md)
2725 struct dm_table *map = NULL;
2729 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2731 if (!dm_suspended_md(md))
2734 if (dm_suspended_internally_md(md)) {
2735 /* already internally suspended, wait for internal resume */
2736 mutex_unlock(&md->suspend_lock);
2737 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2743 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2744 if (!map || !dm_table_get_size(map))
2747 r = __dm_resume(md, map);
2751 clear_bit(DMF_SUSPENDED, &md->flags);
2753 mutex_unlock(&md->suspend_lock);
2759 * Internal suspend/resume works like userspace-driven suspend. It waits
2760 * until all bios finish and prevents issuing new bios to the target drivers.
2761 * It may be used only from the kernel.
2764 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2766 struct dm_table *map = NULL;
2768 lockdep_assert_held(&md->suspend_lock);
2770 if (md->internal_suspend_count++)
2771 return; /* nested internal suspend */
2773 if (dm_suspended_md(md)) {
2774 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2775 return; /* nest suspend */
2778 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2781 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2782 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2783 * would require changing .presuspend to return an error -- avoid this
2784 * until there is a need for more elaborate variants of internal suspend.
2786 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2787 DMF_SUSPENDED_INTERNALLY);
2789 set_bit(DMF_POST_SUSPENDING, &md->flags);
2790 dm_table_postsuspend_targets(map);
2791 clear_bit(DMF_POST_SUSPENDING, &md->flags);
2794 static void __dm_internal_resume(struct mapped_device *md)
2796 BUG_ON(!md->internal_suspend_count);
2798 if (--md->internal_suspend_count)
2799 return; /* resume from nested internal suspend */
2801 if (dm_suspended_md(md))
2802 goto done; /* resume from nested suspend */
2805 * NOTE: existing callers don't need to call dm_table_resume_targets
2806 * (which may fail -- so best to avoid it for now by passing NULL map)
2808 (void) __dm_resume(md, NULL);
2811 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2812 smp_mb__after_atomic();
2813 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2816 void dm_internal_suspend_noflush(struct mapped_device *md)
2818 mutex_lock(&md->suspend_lock);
2819 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2820 mutex_unlock(&md->suspend_lock);
2822 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2824 void dm_internal_resume(struct mapped_device *md)
2826 mutex_lock(&md->suspend_lock);
2827 __dm_internal_resume(md);
2828 mutex_unlock(&md->suspend_lock);
2830 EXPORT_SYMBOL_GPL(dm_internal_resume);
2833 * Fast variants of internal suspend/resume hold md->suspend_lock,
2834 * which prevents interaction with userspace-driven suspend.
2837 void dm_internal_suspend_fast(struct mapped_device *md)
2839 mutex_lock(&md->suspend_lock);
2840 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2843 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2844 synchronize_srcu(&md->io_barrier);
2845 flush_workqueue(md->wq);
2846 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2848 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2850 void dm_internal_resume_fast(struct mapped_device *md)
2852 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2858 mutex_unlock(&md->suspend_lock);
2860 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2862 /*-----------------------------------------------------------------
2863 * Event notification.
2864 *---------------------------------------------------------------*/
2865 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2870 char udev_cookie[DM_COOKIE_LENGTH];
2871 char *envp[] = { udev_cookie, NULL };
2873 noio_flag = memalloc_noio_save();
2876 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2878 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2879 DM_COOKIE_ENV_VAR_NAME, cookie);
2880 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2884 memalloc_noio_restore(noio_flag);
2889 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2891 return atomic_add_return(1, &md->uevent_seq);
2894 uint32_t dm_get_event_nr(struct mapped_device *md)
2896 return atomic_read(&md->event_nr);
2899 int dm_wait_event(struct mapped_device *md, int event_nr)
2901 return wait_event_interruptible(md->eventq,
2902 (event_nr != atomic_read(&md->event_nr)));
2905 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2907 unsigned long flags;
2909 spin_lock_irqsave(&md->uevent_lock, flags);
2910 list_add(elist, &md->uevent_list);
2911 spin_unlock_irqrestore(&md->uevent_lock, flags);
2915 * The gendisk is only valid as long as you have a reference
2918 struct gendisk *dm_disk(struct mapped_device *md)
2922 EXPORT_SYMBOL_GPL(dm_disk);
2924 struct kobject *dm_kobject(struct mapped_device *md)
2926 return &md->kobj_holder.kobj;
2929 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2931 struct mapped_device *md;
2933 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2935 spin_lock(&_minor_lock);
2936 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2942 spin_unlock(&_minor_lock);
2947 int dm_suspended_md(struct mapped_device *md)
2949 return test_bit(DMF_SUSPENDED, &md->flags);
2952 static int dm_post_suspending_md(struct mapped_device *md)
2954 return test_bit(DMF_POST_SUSPENDING, &md->flags);
2957 int dm_suspended_internally_md(struct mapped_device *md)
2959 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2962 int dm_test_deferred_remove_flag(struct mapped_device *md)
2964 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2967 int dm_suspended(struct dm_target *ti)
2969 return dm_suspended_md(ti->table->md);
2971 EXPORT_SYMBOL_GPL(dm_suspended);
2973 int dm_post_suspending(struct dm_target *ti)
2975 return dm_post_suspending_md(ti->table->md);
2977 EXPORT_SYMBOL_GPL(dm_post_suspending);
2979 int dm_noflush_suspending(struct dm_target *ti)
2981 return __noflush_suspending(ti->table->md);
2983 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2985 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2986 unsigned per_io_data_size, unsigned min_pool_size,
2987 bool integrity, bool poll)
2989 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2990 unsigned int pool_size = 0;
2991 unsigned int front_pad, io_front_pad;
2998 case DM_TYPE_BIO_BASED:
2999 case DM_TYPE_DAX_BIO_BASED:
3000 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
3001 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
3002 io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
3003 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0);
3006 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
3009 case DM_TYPE_REQUEST_BASED:
3010 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
3011 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3012 /* per_io_data_size is used for blk-mq pdu at queue allocation */
3018 ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
3022 if (integrity && bioset_integrity_create(&pools->bs, pool_size))
3028 dm_free_md_mempools(pools);
3033 void dm_free_md_mempools(struct dm_md_mempools *pools)
3038 bioset_exit(&pools->bs);
3039 bioset_exit(&pools->io_bs);
3051 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3054 struct mapped_device *md = bdev->bd_disk->private_data;
3055 struct dm_table *table;
3056 struct dm_target *ti;
3057 int ret = -ENOTTY, srcu_idx;
3059 table = dm_get_live_table(md, &srcu_idx);
3060 if (!table || !dm_table_get_size(table))
3063 /* We only support devices that have a single target */
3064 if (dm_table_get_num_targets(table) != 1)
3066 ti = dm_table_get_target(table, 0);
3068 if (dm_suspended_md(md)) {
3074 if (!ti->type->iterate_devices)
3077 ret = ti->type->iterate_devices(ti, fn, data);
3079 dm_put_live_table(md, srcu_idx);
3084 * For register / unregister we need to manually call out to every path.
3086 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3087 sector_t start, sector_t len, void *data)
3089 struct dm_pr *pr = data;
3090 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3092 if (!ops || !ops->pr_register)
3094 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3097 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3108 ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3109 if (ret && new_key) {
3110 /* unregister all paths if we failed to register any path */
3111 pr.old_key = new_key;
3114 pr.fail_early = false;
3115 dm_call_pr(bdev, __dm_pr_register, &pr);
3121 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3124 struct mapped_device *md = bdev->bd_disk->private_data;
3125 const struct pr_ops *ops;
3128 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3132 ops = bdev->bd_disk->fops->pr_ops;
3133 if (ops && ops->pr_reserve)
3134 r = ops->pr_reserve(bdev, key, type, flags);
3138 dm_unprepare_ioctl(md, srcu_idx);
3142 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3144 struct mapped_device *md = bdev->bd_disk->private_data;
3145 const struct pr_ops *ops;
3148 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3152 ops = bdev->bd_disk->fops->pr_ops;
3153 if (ops && ops->pr_release)
3154 r = ops->pr_release(bdev, key, type);
3158 dm_unprepare_ioctl(md, srcu_idx);
3162 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3163 enum pr_type type, bool abort)
3165 struct mapped_device *md = bdev->bd_disk->private_data;
3166 const struct pr_ops *ops;
3169 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3173 ops = bdev->bd_disk->fops->pr_ops;
3174 if (ops && ops->pr_preempt)
3175 r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3179 dm_unprepare_ioctl(md, srcu_idx);
3183 static int dm_pr_clear(struct block_device *bdev, u64 key)
3185 struct mapped_device *md = bdev->bd_disk->private_data;
3186 const struct pr_ops *ops;
3189 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3193 ops = bdev->bd_disk->fops->pr_ops;
3194 if (ops && ops->pr_clear)
3195 r = ops->pr_clear(bdev, key);
3199 dm_unprepare_ioctl(md, srcu_idx);
3203 static const struct pr_ops dm_pr_ops = {
3204 .pr_register = dm_pr_register,
3205 .pr_reserve = dm_pr_reserve,
3206 .pr_release = dm_pr_release,
3207 .pr_preempt = dm_pr_preempt,
3208 .pr_clear = dm_pr_clear,
3211 static const struct block_device_operations dm_blk_dops = {
3212 .submit_bio = dm_submit_bio,
3213 .poll_bio = dm_poll_bio,
3214 .open = dm_blk_open,
3215 .release = dm_blk_close,
3216 .ioctl = dm_blk_ioctl,
3217 .getgeo = dm_blk_getgeo,
3218 .report_zones = dm_blk_report_zones,
3219 .pr_ops = &dm_pr_ops,
3220 .owner = THIS_MODULE
3223 static const struct block_device_operations dm_rq_blk_dops = {
3224 .open = dm_blk_open,
3225 .release = dm_blk_close,
3226 .ioctl = dm_blk_ioctl,
3227 .getgeo = dm_blk_getgeo,
3228 .pr_ops = &dm_pr_ops,
3229 .owner = THIS_MODULE
3232 static const struct dax_operations dm_dax_ops = {
3233 .direct_access = dm_dax_direct_access,
3234 .zero_page_range = dm_dax_zero_page_range,
3235 .recovery_write = dm_dax_recovery_write,
3241 module_init(dm_init);
3242 module_exit(dm_exit);
3244 module_param(major, uint, 0);
3245 MODULE_PARM_DESC(major, "The major number of the device mapper");
3247 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3248 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3250 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3251 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3253 module_param(swap_bios, int, S_IRUGO | S_IWUSR);
3254 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3256 MODULE_DESCRIPTION(DM_NAME " driver");
3257 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3258 MODULE_LICENSE("GPL");