2 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
9 #include <linux/module.h>
11 #define DM_MSG_PREFIX "zoned"
13 #define DMZ_MIN_BIOS 8192
19 struct dmz_target *target;
26 * Chunk work descriptor.
28 struct dm_chunk_work {
29 struct work_struct work;
31 struct dmz_target *target;
33 struct bio_list bio_list;
44 /* Zoned block device information */
47 /* For metadata handling */
48 struct dmz_metadata *metadata;
51 struct dmz_reclaim *reclaim;
54 struct radix_tree_root chunk_rxtree;
55 struct workqueue_struct *chunk_wq;
56 struct mutex chunk_lock;
58 /* For cloned BIOs to zones */
59 struct bio_set bio_set;
62 spinlock_t flush_lock;
63 struct bio_list flush_list;
64 struct delayed_work flush_work;
65 struct workqueue_struct *flush_wq;
69 * Flush intervals (seconds).
71 #define DMZ_FLUSH_PERIOD (10 * HZ)
74 * Target BIO completion.
76 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
78 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
80 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
81 bio->bi_status = status;
82 if (bio->bi_status != BLK_STS_OK)
83 bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
85 if (atomic_dec_and_test(&bioctx->ref)) {
86 struct dm_zone *zone = bioctx->zone;
89 if (bio->bi_status != BLK_STS_OK &&
90 bio_op(bio) == REQ_OP_WRITE &&
92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
93 dmz_deactivate_zone(zone);
100 * Completion callback for an internally cloned target BIO. This terminates the
101 * target BIO when there are no more references to its context.
103 static void dmz_clone_endio(struct bio *clone)
105 struct dmz_bioctx *bioctx = clone->bi_private;
106 blk_status_t status = clone->bi_status;
109 dmz_bio_endio(bioctx->bio, status);
113 * Issue a clone of a target BIO. The clone may only partially process the
114 * original target BIO.
116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
117 struct bio *bio, sector_t chunk_block,
118 unsigned int nr_blocks)
120 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
123 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
127 bio_set_dev(clone, dmz->dev->bdev);
128 clone->bi_iter.bi_sector =
129 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
130 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
131 clone->bi_end_io = dmz_clone_endio;
132 clone->bi_private = bioctx;
134 bio_advance(bio, clone->bi_iter.bi_size);
136 atomic_inc(&bioctx->ref);
137 generic_make_request(clone);
139 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
140 zone->wp_block += nr_blocks;
146 * Zero out pages of discarded blocks accessed by a read BIO.
148 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
149 sector_t chunk_block, unsigned int nr_blocks)
151 unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
153 /* Clear nr_blocks */
154 swap(bio->bi_iter.bi_size, size);
156 swap(bio->bi_iter.bi_size, size);
158 bio_advance(bio, size);
162 * Process a read BIO.
164 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
167 sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
168 unsigned int nr_blocks = dmz_bio_blocks(bio);
169 sector_t end_block = chunk_block + nr_blocks;
170 struct dm_zone *rzone, *bzone;
173 /* Read into unmapped chunks need only zeroing the BIO buffer */
179 dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
180 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
181 (dmz_is_rnd(zone) ? "RND" : "SEQ"),
182 dmz_id(dmz->metadata, zone),
183 (unsigned long long)chunk_block, nr_blocks);
185 /* Check block validity to determine the read location */
187 while (chunk_block < end_block) {
189 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
190 /* Test block validity in the data zone */
191 ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
195 /* Read data zone blocks */
202 * No valid blocks found in the data zone.
203 * Check the buffer zone, if there is one.
205 if (!nr_blocks && bzone) {
206 ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
210 /* Read buffer zone blocks */
217 /* Valid blocks found: read them */
218 nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
219 ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
222 chunk_block += nr_blocks;
224 /* No valid block: zeroout the current BIO block */
225 dmz_handle_read_zero(dmz, bio, chunk_block, 1);
234 * Write blocks directly in a data zone, at the write pointer.
235 * If a buffer zone is assigned, invalidate the blocks written
238 static int dmz_handle_direct_write(struct dmz_target *dmz,
239 struct dm_zone *zone, struct bio *bio,
240 sector_t chunk_block,
241 unsigned int nr_blocks)
243 struct dmz_metadata *zmd = dmz->metadata;
244 struct dm_zone *bzone = zone->bzone;
247 if (dmz_is_readonly(zone))
251 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
256 * Validate the blocks in the data zone and invalidate
257 * in the buffer zone, if there is one.
259 ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
260 if (ret == 0 && bzone)
261 ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
267 * Write blocks in the buffer zone of @zone.
268 * If no buffer zone is assigned yet, get one.
269 * Called with @zone write locked.
271 static int dmz_handle_buffered_write(struct dmz_target *dmz,
272 struct dm_zone *zone, struct bio *bio,
273 sector_t chunk_block,
274 unsigned int nr_blocks)
276 struct dmz_metadata *zmd = dmz->metadata;
277 struct dm_zone *bzone;
280 /* Get the buffer zone. One will be allocated if needed */
281 bzone = dmz_get_chunk_buffer(zmd, zone);
283 return PTR_ERR(bzone);
285 if (dmz_is_readonly(bzone))
289 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
294 * Validate the blocks in the buffer zone
295 * and invalidate in the data zone.
297 ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
298 if (ret == 0 && chunk_block < zone->wp_block)
299 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
305 * Process a write BIO.
307 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
310 sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
311 unsigned int nr_blocks = dmz_bio_blocks(bio);
316 dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
317 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
318 (dmz_is_rnd(zone) ? "RND" : "SEQ"),
319 dmz_id(dmz->metadata, zone),
320 (unsigned long long)chunk_block, nr_blocks);
322 if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
324 * zone is a random zone or it is a sequential zone
325 * and the BIO is aligned to the zone write pointer:
326 * direct write the zone.
328 return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
332 * This is an unaligned write in a sequential zone:
333 * use buffered write.
335 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
339 * Process a discard BIO.
341 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
344 struct dmz_metadata *zmd = dmz->metadata;
345 sector_t block = dmz_bio_block(bio);
346 unsigned int nr_blocks = dmz_bio_blocks(bio);
347 sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
350 /* For unmapped chunks, there is nothing to do */
354 if (dmz_is_readonly(zone))
357 dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
358 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
360 (unsigned long long)chunk_block, nr_blocks);
363 * Invalidate blocks in the data zone and its
364 * buffer zone if one is mapped.
366 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
367 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
368 if (ret == 0 && zone->bzone)
369 ret = dmz_invalidate_blocks(zmd, zone->bzone,
370 chunk_block, nr_blocks);
377 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
380 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
381 struct dmz_metadata *zmd = dmz->metadata;
382 struct dm_zone *zone;
386 * Write may trigger a zone allocation. So make sure the
387 * allocation can succeed.
389 if (bio_op(bio) == REQ_OP_WRITE)
390 dmz_schedule_reclaim(dmz->reclaim);
392 dmz_lock_metadata(zmd);
394 if (dmz->dev->flags & DMZ_BDEV_DYING) {
400 * Get the data zone mapping the chunk. There may be no
401 * mapping for read and discard. If a mapping is obtained,
402 + the zone returned will be set to active state.
404 zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
411 /* Process the BIO */
413 dmz_activate_zone(zone);
417 switch (bio_op(bio)) {
419 ret = dmz_handle_read(dmz, zone, bio);
422 ret = dmz_handle_write(dmz, zone, bio);
425 case REQ_OP_WRITE_ZEROES:
426 ret = dmz_handle_discard(dmz, zone, bio);
429 dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
435 * Release the chunk mapping. This will check that the mapping
436 * is still valid, that is, that the zone used still has valid blocks.
439 dmz_put_chunk_mapping(zmd, zone);
441 dmz_bio_endio(bio, errno_to_blk_status(ret));
443 dmz_unlock_metadata(zmd);
447 * Increment a chunk reference counter.
449 static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
451 atomic_inc(&cw->refcount);
455 * Decrement a chunk work reference count and
456 * free it if it becomes 0.
458 static void dmz_put_chunk_work(struct dm_chunk_work *cw)
460 if (atomic_dec_and_test(&cw->refcount)) {
461 WARN_ON(!bio_list_empty(&cw->bio_list));
462 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
468 * Chunk BIO work function.
470 static void dmz_chunk_work(struct work_struct *work)
472 struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
473 struct dmz_target *dmz = cw->target;
476 mutex_lock(&dmz->chunk_lock);
478 /* Process the chunk BIOs */
479 while ((bio = bio_list_pop(&cw->bio_list))) {
480 mutex_unlock(&dmz->chunk_lock);
481 dmz_handle_bio(dmz, cw, bio);
482 mutex_lock(&dmz->chunk_lock);
483 dmz_put_chunk_work(cw);
486 /* Queueing the work incremented the work refcount */
487 dmz_put_chunk_work(cw);
489 mutex_unlock(&dmz->chunk_lock);
495 static void dmz_flush_work(struct work_struct *work)
497 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
501 /* Flush dirty metadata blocks */
502 ret = dmz_flush_metadata(dmz->metadata);
504 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
506 /* Process queued flush requests */
508 spin_lock(&dmz->flush_lock);
509 bio = bio_list_pop(&dmz->flush_list);
510 spin_unlock(&dmz->flush_lock);
515 dmz_bio_endio(bio, errno_to_blk_status(ret));
518 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
522 * Get a chunk work and start it to process a new BIO.
523 * If the BIO chunk has no work yet, create one.
525 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
527 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
528 struct dm_chunk_work *cw;
531 mutex_lock(&dmz->chunk_lock);
533 /* Get the BIO chunk work. If one is not active yet, create one */
534 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
537 /* Create a new chunk work */
538 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
544 INIT_WORK(&cw->work, dmz_chunk_work);
545 atomic_set(&cw->refcount, 0);
548 bio_list_init(&cw->bio_list);
550 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
557 bio_list_add(&cw->bio_list, bio);
558 dmz_get_chunk_work(cw);
560 dmz_reclaim_bio_acc(dmz->reclaim);
561 if (queue_work(dmz->chunk_wq, &cw->work))
562 dmz_get_chunk_work(cw);
564 mutex_unlock(&dmz->chunk_lock);
569 * Check if the backing device is being removed. If it's on the way out,
570 * start failing I/O. Reclaim and metadata components also call this
571 * function to cleanly abort operation in the event of such failure.
573 bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
575 if (dmz_dev->flags & DMZ_BDEV_DYING)
578 if (dmz_dev->flags & DMZ_CHECK_BDEV)
579 return !dmz_check_bdev(dmz_dev);
581 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
582 dmz_dev_warn(dmz_dev, "Backing device queue dying");
583 dmz_dev->flags |= DMZ_BDEV_DYING;
586 return dmz_dev->flags & DMZ_BDEV_DYING;
590 * Check the backing device availability. This detects such events as
591 * backing device going offline due to errors, media removals, etc.
592 * This check is less efficient than dmz_bdev_is_dying() and should
593 * only be performed as a part of error handling.
595 bool dmz_check_bdev(struct dmz_dev *dmz_dev)
597 struct gendisk *disk;
599 dmz_dev->flags &= ~DMZ_CHECK_BDEV;
601 if (dmz_bdev_is_dying(dmz_dev))
604 disk = dmz_dev->bdev->bd_disk;
605 if (disk->fops->check_events &&
606 disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
607 dmz_dev_warn(dmz_dev, "Backing device offline");
608 dmz_dev->flags |= DMZ_BDEV_DYING;
611 return !(dmz_dev->flags & DMZ_BDEV_DYING);
617 static int dmz_map(struct dm_target *ti, struct bio *bio)
619 struct dmz_target *dmz = ti->private;
620 struct dmz_dev *dev = dmz->dev;
621 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
622 sector_t sector = bio->bi_iter.bi_sector;
623 unsigned int nr_sectors = bio_sectors(bio);
624 sector_t chunk_sector;
627 if (dmz_bdev_is_dying(dmz->dev))
628 return DM_MAPIO_KILL;
630 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
631 bio_op(bio), (unsigned long long)sector, nr_sectors,
632 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
633 (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
634 (unsigned int)dmz_bio_blocks(bio));
636 bio_set_dev(bio, dev->bdev);
638 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
639 return DM_MAPIO_REMAPPED;
641 /* The BIO should be block aligned */
642 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
643 return DM_MAPIO_KILL;
645 /* Initialize the BIO context */
646 bioctx->target = dmz;
649 atomic_set(&bioctx->ref, 1);
651 /* Set the BIO pending in the flush list */
652 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
653 spin_lock(&dmz->flush_lock);
654 bio_list_add(&dmz->flush_list, bio);
655 spin_unlock(&dmz->flush_lock);
656 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
657 return DM_MAPIO_SUBMITTED;
660 /* Split zone BIOs to fit entirely into a zone */
661 chunk_sector = sector & (dev->zone_nr_sectors - 1);
662 if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
663 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
665 /* Now ready to handle this BIO */
666 ret = dmz_queue_chunk_work(dmz, bio);
668 dmz_dev_debug(dmz->dev,
669 "BIO op %d, can't process chunk %llu, err %i\n",
670 bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
672 return DM_MAPIO_REQUEUE;
675 return DM_MAPIO_SUBMITTED;
679 * Get zoned device information.
681 static int dmz_get_zoned_device(struct dm_target *ti, char *path)
683 struct dmz_target *dmz = ti->private;
684 struct request_queue *q;
686 sector_t aligned_capacity;
689 /* Get the target device */
690 ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
692 ti->error = "Get target device failed";
697 dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
703 dev->bdev = dmz->ddev->bdev;
704 (void)bdevname(dev->bdev, dev->name);
706 if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
707 ti->error = "Not a zoned block device";
712 q = bdev_get_queue(dev->bdev);
713 dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
714 aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
716 ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
717 ti->error = "Partial mapping not supported";
722 dev->zone_nr_sectors = blk_queue_zone_sectors(q);
723 dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
725 dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
726 dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
728 dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
729 >> dev->zone_nr_sectors_shift;
735 dm_put_device(ti, dmz->ddev);
742 * Cleanup zoned device information.
744 static void dmz_put_zoned_device(struct dm_target *ti)
746 struct dmz_target *dmz = ti->private;
748 dm_put_device(ti, dmz->ddev);
756 static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
758 struct dmz_target *dmz;
762 /* Check arguments */
764 ti->error = "Invalid argument count";
768 /* Allocate and initialize the target descriptor */
769 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
771 ti->error = "Unable to allocate the zoned target descriptor";
776 /* Get the target zoned block device */
777 ret = dmz_get_zoned_device(ti, argv[0]);
783 /* Initialize metadata */
785 ret = dmz_ctr_metadata(dev, &dmz->metadata);
787 ti->error = "Metadata initialization failed";
791 /* Set target (no write same support) */
792 ti->max_io_len = dev->zone_nr_sectors;
793 ti->num_flush_bios = 1;
794 ti->num_discard_bios = 1;
795 ti->num_write_zeroes_bios = 1;
796 ti->per_io_data_size = sizeof(struct dmz_bioctx);
797 ti->flush_supported = true;
798 ti->discards_supported = true;
799 ti->split_discard_bios = true;
801 /* The exposed capacity is the number of chunks that can be mapped */
802 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
805 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
807 ti->error = "Create BIO set failed";
812 mutex_init(&dmz->chunk_lock);
813 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
814 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
816 if (!dmz->chunk_wq) {
817 ti->error = "Create chunk workqueue failed";
823 spin_lock_init(&dmz->flush_lock);
824 bio_list_init(&dmz->flush_list);
825 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
826 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
828 if (!dmz->flush_wq) {
829 ti->error = "Create flush workqueue failed";
833 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
835 /* Initialize reclaim */
836 ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
838 ti->error = "Zone reclaim initialization failed";
842 dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
843 (unsigned long long)ti->len,
844 (unsigned long long)dmz_sect2blk(ti->len));
848 destroy_workqueue(dmz->flush_wq);
850 destroy_workqueue(dmz->chunk_wq);
852 mutex_destroy(&dmz->chunk_lock);
853 bioset_exit(&dmz->bio_set);
855 dmz_dtr_metadata(dmz->metadata);
857 dmz_put_zoned_device(ti);
867 static void dmz_dtr(struct dm_target *ti)
869 struct dmz_target *dmz = ti->private;
871 flush_workqueue(dmz->chunk_wq);
872 destroy_workqueue(dmz->chunk_wq);
874 dmz_dtr_reclaim(dmz->reclaim);
876 cancel_delayed_work_sync(&dmz->flush_work);
877 destroy_workqueue(dmz->flush_wq);
879 (void) dmz_flush_metadata(dmz->metadata);
881 dmz_dtr_metadata(dmz->metadata);
883 bioset_exit(&dmz->bio_set);
885 dmz_put_zoned_device(ti);
887 mutex_destroy(&dmz->chunk_lock);
893 * Setup target request queue limits.
895 static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
897 struct dmz_target *dmz = ti->private;
898 unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
900 limits->logical_block_size = DMZ_BLOCK_SIZE;
901 limits->physical_block_size = DMZ_BLOCK_SIZE;
903 blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
904 blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
906 limits->discard_alignment = DMZ_BLOCK_SIZE;
907 limits->discard_granularity = DMZ_BLOCK_SIZE;
908 limits->max_discard_sectors = chunk_sectors;
909 limits->max_hw_discard_sectors = chunk_sectors;
910 limits->max_write_zeroes_sectors = chunk_sectors;
912 /* FS hint to try to align to the device zone size */
913 limits->chunk_sectors = chunk_sectors;
914 limits->max_sectors = chunk_sectors;
916 /* We are exposing a drive-managed zoned block device */
917 limits->zoned = BLK_ZONED_NONE;
921 * Pass on ioctl to the backend device.
923 static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
925 struct dmz_target *dmz = ti->private;
927 if (!dmz_check_bdev(dmz->dev))
930 *bdev = dmz->dev->bdev;
936 * Stop works on suspend.
938 static void dmz_suspend(struct dm_target *ti)
940 struct dmz_target *dmz = ti->private;
942 flush_workqueue(dmz->chunk_wq);
943 dmz_suspend_reclaim(dmz->reclaim);
944 cancel_delayed_work_sync(&dmz->flush_work);
948 * Restart works on resume or if suspend failed.
950 static void dmz_resume(struct dm_target *ti)
952 struct dmz_target *dmz = ti->private;
954 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
955 dmz_resume_reclaim(dmz->reclaim);
958 static int dmz_iterate_devices(struct dm_target *ti,
959 iterate_devices_callout_fn fn, void *data)
961 struct dmz_target *dmz = ti->private;
962 struct dmz_dev *dev = dmz->dev;
963 sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
965 return fn(ti, dmz->ddev, 0, capacity, data);
968 static struct target_type dmz_type = {
970 .version = {1, 0, 0},
971 .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
972 .module = THIS_MODULE,
976 .io_hints = dmz_io_hints,
977 .prepare_ioctl = dmz_prepare_ioctl,
978 .postsuspend = dmz_suspend,
979 .resume = dmz_resume,
980 .iterate_devices = dmz_iterate_devices,
983 static int __init dmz_init(void)
985 return dm_register_target(&dmz_type);
988 static void __exit dmz_exit(void)
990 dm_unregister_target(&dmz_type);
993 module_init(dmz_init);
994 module_exit(dmz_exit);
996 MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
997 MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
998 MODULE_LICENSE("GPL");