1 // SPDX-License-Identifier: GPL-2.0
3 * random utiility code, for bcache but in theory not specific to bcache
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include <linux/blkdev.h>
11 #include <linux/ctype.h>
12 #include <linux/debugfs.h>
13 #include <linux/module.h>
14 #include <linux/seq_file.h>
15 #include <linux/types.h>
16 #include <linux/sched/clock.h>
20 #define simple_strtoint(c, end, base) simple_strtol(c, end, base)
21 #define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
23 #define STRTO_H(name, type) \
24 int bch_ ## name ## _h(const char *cp, type *res) \
28 type i = simple_ ## name(cp, &e, 10); \
30 switch (tolower(*e)) { \
67 if ((type) ~0 > 0 && \
68 (type) ~0 / 1024 <= i) \
70 if ((i > 0 && ANYSINT_MAX(type) / 1024 < i) || \
71 (i < 0 && -ANYSINT_MAX(type) / 1024 > i)) \
80 STRTO_H(strtoint, int)
81 STRTO_H(strtouint, unsigned int)
82 STRTO_H(strtoll, long long)
83 STRTO_H(strtoull, unsigned long long)
86 * bch_hprint - formats @v to human readable string for sysfs.
87 * @buf: the (at least 8 byte) buffer to format the result into.
88 * @v: signed 64 bit integer
90 * Returns the number of bytes used by format.
92 ssize_t bch_hprint(char *buf, int64_t v)
94 static const char units[] = "?kMGTPEZY";
104 /* For as long as the number is more than 3 digits, but at least
105 * once, shift right / divide by 1024. Keep the remainder for
106 * a digit after the decimal point.
116 /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
119 return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
121 return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
124 bool bch_is_zero(const char *p, size_t n)
128 for (i = 0; i < n; i++)
134 int bch_parse_uuid(const char *s, char *uuid)
141 i < strspn(s, "-0123456789:ABCDEFabcdef") && j < 32;
163 void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
165 uint64_t now, duration, last;
167 spin_lock(&stats->lock);
170 duration = time_after64(now, start_time)
171 ? now - start_time : 0;
172 last = time_after64(now, stats->last)
173 ? now - stats->last : 0;
175 stats->max_duration = max(stats->max_duration, duration);
178 ewma_add(stats->average_duration, duration, 8, 8);
180 if (stats->average_frequency)
181 ewma_add(stats->average_frequency, last, 8, 8);
183 stats->average_frequency = last << 8;
185 stats->average_duration = duration << 8;
188 stats->last = now ?: 1;
190 spin_unlock(&stats->lock);
194 * bch_next_delay() - update ratelimiting statistics and calculate next delay
195 * @d: the struct bch_ratelimit to update
196 * @done: the amount of work done, in arbitrary units
198 * Increment @d by the amount of work done, and return how long to delay in
199 * jiffies until the next time to do some work.
201 uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
203 uint64_t now = local_clock();
205 d->next += div_u64(done * NSEC_PER_SEC, atomic_long_read(&d->rate));
207 /* Bound the time. Don't let us fall further than 2 seconds behind
208 * (this prevents unnecessary backlog that would make it impossible
209 * to catch up). If we're ahead of the desired writeback rate,
210 * don't let us sleep more than 2.5 seconds (so we can notice/respond
211 * if the control system tells us to speed up!).
213 if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next))
214 d->next = now + NSEC_PER_SEC * 5LLU / 2LLU;
216 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
217 d->next = now - NSEC_PER_SEC * 2;
219 return time_after64(d->next, now)
220 ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
225 * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
226 * the preferred way is bio_add_page, but in this case, bch_bio_map()
227 * supposes that the bvec table is empty, so it is safe to access
228 * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
231 void bch_bio_map(struct bio *bio, void *base)
233 size_t size = bio->bi_iter.bi_size;
234 struct bio_vec *bv = bio->bi_io_vec;
236 BUG_ON(!bio->bi_iter.bi_size);
237 BUG_ON(bio->bi_vcnt);
239 bv->bv_offset = base ? offset_in_page(base) : 0;
242 for (; size; bio->bi_vcnt++, bv++) {
244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
247 bv->bv_page = is_vmalloc_addr(base)
248 ? vmalloc_to_page(base)
249 : virt_to_page(base);
259 * bch_bio_alloc_pages - allocates a single page for each bvec in a bio
260 * @bio: bio to allocate pages for
261 * @gfp_mask: flags for allocation
263 * Allocates pages up to @bio->bi_vcnt.
265 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
268 int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
273 bio_for_each_segment_all(bv, bio, i) {
274 bv->bv_page = alloc_page(gfp_mask);
276 while (--bv >= bio->bi_io_vec)
277 __free_page(bv->bv_page);