1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHE_WRITEBACK_H
3 #define _BCACHE_WRITEBACK_H
5 #define CUTOFF_WRITEBACK 40
6 #define CUTOFF_WRITEBACK_SYNC 70
8 #define MAX_WRITEBACKS_IN_PASS 5
9 #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
11 #define WRITEBACK_RATE_UPDATE_SECS_MAX 60
12 #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
15 * 14 (16384ths) is chosen here as something that each backing device
16 * should be a reasonable fraction of the share, and not to blow up
17 * until individual backing devices are a petabyte.
19 #define WRITEBACK_SHARE_SHIFT 14
21 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
25 for (i = 0; i < d->nr_stripes; i++)
26 ret += atomic_read(d->stripe_sectors_dirty + i);
31 static inline int offset_to_stripe(struct bcache_device *d,
34 do_div(offset, d->stripe_size);
36 /* d->nr_stripes is in range [1, INT_MAX] */
37 if (unlikely(offset >= d->nr_stripes)) {
38 pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
39 offset, d->nr_stripes);
44 * Here offset is definitly smaller than INT_MAX,
45 * return it as int will never overflow.
50 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
52 unsigned int nr_sectors)
54 int stripe = offset_to_stripe(&dc->disk, offset);
60 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
63 if (nr_sectors <= dc->disk.stripe_size)
66 nr_sectors -= dc->disk.stripe_size;
71 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
72 unsigned int cache_mode, bool would_skip)
74 unsigned int in_use = dc->disk.c->gc_stats.in_use;
76 if (cache_mode != CACHE_MODE_WRITEBACK ||
77 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
78 in_use > CUTOFF_WRITEBACK_SYNC)
81 if (bio_op(bio) == REQ_OP_DISCARD)
84 if (dc->partial_stripes_expensive &&
85 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
92 return (op_is_sync(bio->bi_opf) ||
93 bio->bi_opf & (REQ_META|REQ_PRIO) ||
94 in_use <= CUTOFF_WRITEBACK);
97 static inline void bch_writeback_queue(struct cached_dev *dc)
99 if (!IS_ERR_OR_NULL(dc->writeback_thread))
100 wake_up_process(dc->writeback_thread);
103 static inline void bch_writeback_add(struct cached_dev *dc)
105 if (!atomic_read(&dc->has_dirty) &&
106 !atomic_xchg(&dc->has_dirty, 1)) {
107 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
108 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
109 /* XXX: should do this synchronously */
110 bch_write_bdev_super(dc, NULL);
113 bch_writeback_queue(dc);
117 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
118 uint64_t offset, int nr_sectors);
120 void bch_sectors_dirty_init(struct bcache_device *d);
121 void bch_cached_dev_writeback_init(struct cached_dev *dc);
122 int bch_cached_dev_writeback_start(struct cached_dev *dc);