2 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
9 #include <linux/module.h>
11 #define DM_MSG_PREFIX "zoned reclaim"
14 struct dmz_metadata *metadata;
17 struct delayed_work work;
18 struct workqueue_struct *wq;
20 struct dm_kcopyd_client *kc;
21 struct dm_kcopyd_throttle kc_throttle;
26 /* Last target access time */
31 * Reclaim state flags.
38 * Number of seconds of target BIO inactivity to consider the target idle.
40 #define DMZ_IDLE_PERIOD (10UL * HZ)
43 * Percentage of unmapped (free) random zones below which reclaim starts
44 * even if the target is busy.
46 #define DMZ_RECLAIM_LOW_UNMAP_RND 30
49 * Percentage of unmapped (free) random zones above which reclaim will
50 * stop if the target is busy.
52 #define DMZ_RECLAIM_HIGH_UNMAP_RND 50
55 * Align a sequential zone write pointer to chunk_block.
57 static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
60 struct dmz_metadata *zmd = zrc->metadata;
61 sector_t wp_block = zone->wp_block;
62 unsigned int nr_blocks;
65 if (wp_block == block)
72 * Zeroout the space between the write
73 * pointer and the requested position.
75 nr_blocks = block - wp_block;
76 ret = blkdev_issue_zeroout(zrc->dev->bdev,
77 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
78 dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
82 dmz_id(zmd, zone), (unsigned long long)wp_block,
83 (unsigned long long)block, nr_blocks, ret);
84 dmz_check_bdev(zrc->dev);
88 zone->wp_block = block;
94 * dm_kcopyd_copy end notification.
96 static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
99 struct dmz_reclaim *zrc = context;
101 if (read_err || write_err)
106 clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
107 smp_mb__after_atomic();
108 wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
112 * Copy valid blocks of src_zone into dst_zone.
114 static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
115 struct dm_zone *src_zone, struct dm_zone *dst_zone)
117 struct dmz_metadata *zmd = zrc->metadata;
118 struct dmz_dev *dev = zrc->dev;
119 struct dm_io_region src, dst;
120 sector_t block = 0, end_block;
122 sector_t src_zone_block;
123 sector_t dst_zone_block;
124 unsigned long flags = 0;
127 if (dmz_is_seq(src_zone))
128 end_block = src_zone->wp_block;
130 end_block = dev->zone_nr_blocks;
131 src_zone_block = dmz_start_block(zmd, src_zone);
132 dst_zone_block = dmz_start_block(zmd, dst_zone);
134 if (dmz_is_seq(dst_zone))
135 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
137 while (block < end_block) {
138 if (dev->flags & DMZ_BDEV_DYING)
141 /* Get a valid region from the source zone */
142 ret = dmz_first_valid_block(zmd, src_zone, &block);
148 * If we are writing in a sequential zone, we must make sure
149 * that writes are sequential. So Zeroout any eventual hole
152 if (dmz_is_seq(dst_zone)) {
153 ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
158 src.bdev = dev->bdev;
159 src.sector = dmz_blk2sect(src_zone_block + block);
160 src.count = dmz_blk2sect(nr_blocks);
162 dst.bdev = dev->bdev;
163 dst.sector = dmz_blk2sect(dst_zone_block + block);
164 dst.count = src.count;
166 /* Copy the valid region */
167 set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
168 ret = dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
169 dmz_reclaim_kcopy_end, zrc);
173 /* Wait for copy to complete */
174 wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
175 TASK_UNINTERRUPTIBLE);
180 if (dmz_is_seq(dst_zone))
181 dst_zone->wp_block = block;
188 * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
189 * and free the buffer zone.
191 static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
193 struct dm_zone *bzone = dzone->bzone;
194 sector_t chunk_block = dzone->wp_block;
195 struct dmz_metadata *zmd = zrc->metadata;
198 dmz_dev_debug(zrc->dev,
199 "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
200 dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
201 dmz_id(zmd, dzone), dmz_weight(dzone));
203 /* Flush data zone into the buffer zone */
204 ret = dmz_reclaim_copy(zrc, bzone, dzone);
210 /* Validate copied blocks */
211 ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
213 /* Free the buffer zone */
214 dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
216 dmz_unmap_zone(zmd, bzone);
217 dmz_unlock_zone_reclaim(dzone);
218 dmz_free_zone(zmd, bzone);
222 dmz_unlock_flush(zmd);
228 * Merge valid blocks of dzone into its buffer zone and free dzone.
230 static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
232 unsigned int chunk = dzone->chunk;
233 struct dm_zone *bzone = dzone->bzone;
234 struct dmz_metadata *zmd = zrc->metadata;
237 dmz_dev_debug(zrc->dev,
238 "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
239 chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
240 dmz_id(zmd, bzone), dmz_weight(bzone));
242 /* Flush data zone into the buffer zone */
243 ret = dmz_reclaim_copy(zrc, dzone, bzone);
249 /* Validate copied blocks */
250 ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
253 * Free the data zone and remap the chunk to
256 dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
258 dmz_unmap_zone(zmd, bzone);
259 dmz_unmap_zone(zmd, dzone);
260 dmz_unlock_zone_reclaim(dzone);
261 dmz_free_zone(zmd, dzone);
262 dmz_map_zone(zmd, bzone, chunk);
266 dmz_unlock_flush(zmd);
272 * Move valid blocks of the random data zone dzone into a free sequential zone.
273 * Once blocks are moved, remap the zone chunk to the sequential zone.
275 static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
277 unsigned int chunk = dzone->chunk;
278 struct dm_zone *szone = NULL;
279 struct dmz_metadata *zmd = zrc->metadata;
282 /* Get a free sequential zone */
284 szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
289 dmz_dev_debug(zrc->dev,
290 "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
291 chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
294 /* Flush the random data zone into the sequential zone */
295 ret = dmz_reclaim_copy(zrc, dzone, szone);
300 /* Validate copied blocks */
301 ret = dmz_copy_valid_blocks(zmd, dzone, szone);
304 /* Free the sequential zone */
306 dmz_free_zone(zmd, szone);
309 /* Free the data zone and remap the chunk */
310 dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
312 dmz_unmap_zone(zmd, dzone);
313 dmz_unlock_zone_reclaim(dzone);
314 dmz_free_zone(zmd, dzone);
315 dmz_map_zone(zmd, szone, chunk);
319 dmz_unlock_flush(zmd);
325 * Reclaim an empty zone.
327 static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
329 struct dmz_metadata *zmd = zrc->metadata;
333 dmz_unmap_zone(zmd, dzone);
334 dmz_unlock_zone_reclaim(dzone);
335 dmz_free_zone(zmd, dzone);
337 dmz_unlock_flush(zmd);
341 * Find a candidate zone for reclaim and process it.
343 static int dmz_do_reclaim(struct dmz_reclaim *zrc)
345 struct dmz_metadata *zmd = zrc->metadata;
346 struct dm_zone *dzone;
347 struct dm_zone *rzone;
351 /* Get a data zone */
352 dzone = dmz_get_zone_for_reclaim(zmd);
358 if (dmz_is_rnd(dzone)) {
359 if (!dmz_weight(dzone)) {
361 dmz_reclaim_empty(zrc, dzone);
365 * Reclaim the random data zone by moving its
366 * valid data blocks to a free sequential zone.
368 ret = dmz_reclaim_rnd_data(zrc, dzone);
373 struct dm_zone *bzone = dzone->bzone;
374 sector_t chunk_block = 0;
376 ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
380 if (ret == 0 || chunk_block >= dzone->wp_block) {
382 * The buffer zone is empty or its valid blocks are
383 * after the data zone write pointer.
385 ret = dmz_reclaim_buf(zrc, dzone);
389 * Reclaim the data zone by merging it into the
390 * buffer zone so that the buffer zone itself can
391 * be later reclaimed.
393 ret = dmz_reclaim_seq_data(zrc, dzone);
399 dmz_unlock_zone_reclaim(dzone);
403 ret = dmz_flush_metadata(zrc->metadata);
405 dmz_dev_debug(zrc->dev,
406 "Metadata flush for zone %u failed, err %d\n",
407 dmz_id(zmd, rzone), ret);
411 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
412 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
417 * Test if the target device is idle.
419 static inline int dmz_target_idle(struct dmz_reclaim *zrc)
421 return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
425 * Test if reclaim is necessary.
427 static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
429 struct dmz_metadata *zmd = zrc->metadata;
430 unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
431 unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
432 unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
434 /* Reclaim when idle */
435 if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
438 /* If there are still plenty of random zones, do not reclaim */
439 if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
443 * If the percentage of unmappped random zones is low,
444 * reclaim even if the target is busy.
446 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
450 * Reclaim work function.
452 static void dmz_reclaim_work(struct work_struct *work)
454 struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
455 struct dmz_metadata *zmd = zrc->metadata;
456 unsigned int nr_rnd, nr_unmap_rnd;
457 unsigned int p_unmap_rnd;
460 if (dmz_bdev_is_dying(zrc->dev))
463 if (!dmz_should_reclaim(zrc)) {
464 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
469 * We need to start reclaiming random zones: set up zone copy
470 * throttling to either go fast if we are very low on random zones
471 * and slower if there are still some free random zones to avoid
472 * as much as possible to negatively impact the user workload.
474 nr_rnd = dmz_nr_rnd_zones(zmd);
475 nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
476 p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
477 if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
478 /* Idle or very low percentage: go fast */
479 zrc->kc_throttle.throttle = 100;
481 /* Busy but we still have some random zone: throttle */
482 zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
485 dmz_dev_debug(zrc->dev,
486 "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
487 zrc->kc_throttle.throttle,
488 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
489 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
491 ret = dmz_do_reclaim(zrc);
493 dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
494 if (!dmz_check_bdev(zrc->dev))
498 dmz_schedule_reclaim(zrc);
502 * Initialize reclaim.
504 int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
505 struct dmz_reclaim **reclaim)
507 struct dmz_reclaim *zrc;
510 zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
516 zrc->atime = jiffies;
518 /* Reclaim kcopyd client */
519 zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
520 if (IS_ERR(zrc->kc)) {
521 ret = PTR_ERR(zrc->kc);
527 INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
528 zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
536 queue_delayed_work(zrc->wq, &zrc->work, 0);
541 dm_kcopyd_client_destroy(zrc->kc);
550 void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
552 cancel_delayed_work_sync(&zrc->work);
553 destroy_workqueue(zrc->wq);
554 dm_kcopyd_client_destroy(zrc->kc);
561 void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
563 cancel_delayed_work_sync(&zrc->work);
569 void dmz_resume_reclaim(struct dmz_reclaim *zrc)
571 queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
577 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
579 zrc->atime = jiffies;
583 * Start reclaim if necessary.
585 void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
587 if (dmz_should_reclaim(zrc))
588 mod_delayed_work(zrc->wq, &zrc->work, 0);