1 // SPDX-License-Identifier: GPL-2.0
3 * Zoned block device handling
5 * Copyright (c) 2015, Hannes Reinecke
6 * Copyright (c) 2015, SUSE Linux GmbH
8 * Copyright (c) 2016, Damien Le Moal
9 * Copyright (c) 2016, Western Digital
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/rbtree.h>
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/mm.h>
23 static inline sector_t blk_zone_start(struct request_queue *q,
26 sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
28 return sector & ~zone_mask;
32 * Return true if a request is a write requests that needs zone write locking.
34 bool blk_req_needs_zone_write_lock(struct request *rq)
36 if (!rq->q->seq_zones_wlock)
39 if (blk_rq_is_passthrough(rq))
43 case REQ_OP_WRITE_ZEROES:
44 case REQ_OP_WRITE_SAME:
46 return blk_rq_zone_is_seq(rq);
51 EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
53 void __blk_req_zone_write_lock(struct request *rq)
55 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
56 rq->q->seq_zones_wlock)))
59 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
60 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
62 EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
64 void __blk_req_zone_write_unlock(struct request *rq)
66 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
67 if (rq->q->seq_zones_wlock)
68 WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
69 rq->q->seq_zones_wlock));
71 EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
73 static inline unsigned int __blkdev_nr_zones(struct request_queue *q,
76 sector_t zone_sectors = blk_queue_zone_sectors(q);
78 return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors);
82 * blkdev_nr_zones - Get number of zones
83 * @bdev: Target block device
86 * Return the total number of zones of a zoned block device.
87 * For a regular block device, the number of zones is always 0.
89 unsigned int blkdev_nr_zones(struct block_device *bdev)
91 struct request_queue *q = bdev_get_queue(bdev);
93 if (!blk_queue_is_zoned(q))
96 return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
98 EXPORT_SYMBOL_GPL(blkdev_nr_zones);
101 * Check that a zone report belongs to this partition, and if yes, fix its start
102 * sector and write pointer and return true. Return false otherwise.
104 static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
106 sector_t offset = get_start_sect(bdev);
108 if (rep->start < offset)
111 rep->start -= offset;
112 if (rep->start + rep->len > bdev->bd_part->nr_sects)
115 if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL)
116 rep->wp = rep->start + rep->len;
122 static int blk_report_zones(struct gendisk *disk, sector_t sector,
123 struct blk_zone *zones, unsigned int *nr_zones)
125 struct request_queue *q = disk->queue;
126 unsigned int z = 0, n, nrz = *nr_zones;
127 sector_t capacity = get_capacity(disk);
130 while (z < nrz && sector < capacity) {
132 ret = disk->fops->report_zones(disk, sector, &zones[z], &n);
137 sector += blk_queue_zone_sectors(q) * n;
141 WARN_ON(z > *nr_zones);
148 * blkdev_report_zones - Get zones information
149 * @bdev: Target block device
150 * @sector: Sector from which to report zones
151 * @zones: Array of zone structures where to return the zones information
152 * @nr_zones: Number of zone structures in the zone array
155 * Get zone information starting from the zone containing @sector.
156 * The number of zone information reported may be less than the number
157 * requested by @nr_zones. The number of zones actually reported is
158 * returned in @nr_zones.
159 * The caller must use memalloc_noXX_save/restore() calls to control
160 * memory allocations done within this function (zone array and command
161 * buffer allocation by the device driver).
163 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
164 struct blk_zone *zones, unsigned int *nr_zones)
166 struct request_queue *q = bdev_get_queue(bdev);
170 if (!blk_queue_is_zoned(q))
174 * A block device that advertized itself as zoned must have a
175 * report_zones method. If it does not have one defined, the device
176 * driver has a bug. So warn about that.
178 if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones))
181 if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
187 __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
188 ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector,
193 for (i = 0; i < nrz; i++) {
194 if (!blkdev_report_zone(bdev, zones))
203 EXPORT_SYMBOL_GPL(blkdev_report_zones);
205 static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
209 if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
212 if (sector || nr_sectors != part_nr_sects_read(bdev->bd_part))
215 * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is
216 * the entire disk, that is, if the blocks device start offset is 0 and
217 * its capacity is the same as the entire disk.
219 return get_start_sect(bdev) == 0 &&
220 part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk);
224 * blkdev_reset_zones - Reset zones write pointer
225 * @bdev: Target block device
226 * @sector: Start sector of the first zone to reset
227 * @nr_sectors: Number of sectors, at least the length of one zone
228 * @gfp_mask: Memory allocation flags (for bio_alloc)
231 * Reset the write pointer of the zones contained in the range
232 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
233 * is valid, but the specified range should not contain conventional zones.
235 int blkdev_reset_zones(struct block_device *bdev,
236 sector_t sector, sector_t nr_sectors,
239 struct request_queue *q = bdev_get_queue(bdev);
240 sector_t zone_sectors;
241 sector_t end_sector = sector + nr_sectors;
242 struct bio *bio = NULL;
243 struct blk_plug plug;
246 if (!blk_queue_is_zoned(q))
249 if (bdev_read_only(bdev))
252 if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
256 /* Check alignment (handle eventual smaller last zone) */
257 zone_sectors = blk_queue_zone_sectors(q);
258 if (sector & (zone_sectors - 1))
261 if ((nr_sectors & (zone_sectors - 1)) &&
262 end_sector != bdev->bd_part->nr_sects)
265 blk_start_plug(&plug);
266 while (sector < end_sector) {
267 bio = blk_next_bio(bio, 0, gfp_mask);
268 bio_set_dev(bio, bdev);
271 * Special case for the zone reset operation that reset all
272 * zones, this is useful for applications like mkfs.
274 if (blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
275 bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
279 bio->bi_opf = REQ_OP_ZONE_RESET;
280 bio->bi_iter.bi_sector = sector;
281 sector += zone_sectors;
283 /* This may take a while, so be nice to others */
287 ret = submit_bio_wait(bio);
290 blk_finish_plug(&plug);
294 EXPORT_SYMBOL_GPL(blkdev_reset_zones);
297 * BLKREPORTZONE ioctl processing.
298 * Called from blkdev_ioctl.
300 int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
301 unsigned int cmd, unsigned long arg)
303 void __user *argp = (void __user *)arg;
304 struct request_queue *q;
305 struct blk_zone_report rep;
306 struct blk_zone *zones;
312 q = bdev_get_queue(bdev);
316 if (!blk_queue_is_zoned(q))
319 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
325 rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);
327 zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
328 GFP_KERNEL | __GFP_ZERO);
332 ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones);
336 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) {
342 if (copy_to_user(argp + sizeof(struct blk_zone_report), zones,
343 sizeof(struct blk_zone) * rep.nr_zones))
354 * BLKRESETZONE ioctl processing.
355 * Called from blkdev_ioctl.
357 int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
358 unsigned int cmd, unsigned long arg)
360 void __user *argp = (void __user *)arg;
361 struct request_queue *q;
362 struct blk_zone_range zrange;
367 q = bdev_get_queue(bdev);
371 if (!blk_queue_is_zoned(q))
374 if (!(mode & FMODE_WRITE))
377 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
380 return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
384 static inline unsigned long *blk_alloc_zone_bitmap(int node,
385 unsigned int nr_zones)
387 return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
392 * Allocate an array of struct blk_zone to get nr_zones zone information.
393 * The allocated array may be smaller than nr_zones.
395 static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones)
397 struct blk_zone *zones;
398 size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES);
401 * GFP_KERNEL here is meaningless as the caller task context has
402 * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones()
403 * with memalloc_noio_save().
405 zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL);
416 void blk_queue_free_zone_bitmaps(struct request_queue *q)
418 kfree(q->seq_zones_bitmap);
419 q->seq_zones_bitmap = NULL;
420 kfree(q->seq_zones_wlock);
421 q->seq_zones_wlock = NULL;
425 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
428 * Helper function for low-level device drivers to (re) allocate and initialize
429 * a disk request queue zone bitmaps. This functions should normally be called
430 * within the disk ->revalidate method. For BIO based queues, no zone bitmap
433 int blk_revalidate_disk_zones(struct gendisk *disk)
435 struct request_queue *q = disk->queue;
436 unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk));
437 unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
438 unsigned int i, rep_nr_zones = 0, z = 0, nrz;
439 struct blk_zone *zones = NULL;
440 unsigned int noio_flag;
445 * BIO based queues do not use a scheduler so only q->nr_zones
446 * needs to be updated so that the sysfs exposed value is correct.
448 if (!queue_is_mq(q)) {
449 q->nr_zones = nr_zones;
454 * Ensure that all memory allocations in this context are done as
455 * if GFP_NOIO was specified.
457 noio_flag = memalloc_noio_save();
459 if (!blk_queue_is_zoned(q) || !nr_zones) {
464 /* Allocate bitmaps */
466 seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
467 if (!seq_zones_wlock)
469 seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
470 if (!seq_zones_bitmap)
473 /* Get zone information and initialize seq_zones_bitmap */
474 rep_nr_zones = nr_zones;
475 zones = blk_alloc_zones(&rep_nr_zones);
479 while (z < nr_zones) {
480 nrz = min(nr_zones - z, rep_nr_zones);
481 ret = blk_report_zones(disk, sector, zones, &nrz);
486 for (i = 0; i < nrz; i++) {
487 if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
488 set_bit(z, seq_zones_bitmap);
491 sector += nrz * blk_queue_zone_sectors(q);
494 if (WARN_ON(z != nr_zones)) {
501 * Install the new bitmaps, making sure the queue is stopped and
502 * all I/Os are completed (i.e. a scheduler is not referencing the
505 blk_mq_freeze_queue(q);
506 q->nr_zones = nr_zones;
507 swap(q->seq_zones_wlock, seq_zones_wlock);
508 swap(q->seq_zones_bitmap, seq_zones_bitmap);
509 blk_mq_unfreeze_queue(q);
512 memalloc_noio_restore(noio_flag);
515 kfree(seq_zones_wlock);
516 kfree(seq_zones_bitmap);
519 pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
520 blk_mq_freeze_queue(q);
521 blk_queue_free_zone_bitmaps(q);
522 blk_mq_unfreeze_queue(q);
527 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);