1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
19 /* Default is -1; we skip past it for struct cached_dev's cache mode */
20 static const char * const bch_cache_modes[] = {
28 static const char * const bch_reada_cache_policies[] = {
34 /* Default is -1; we skip past it for stop_when_cache_set_failed */
35 static const char * const bch_stop_on_failure_modes[] = {
41 static const char * const cache_replacement_policies[] = {
48 static const char * const error_actions[] = {
54 write_attribute(attach);
55 write_attribute(detach);
56 write_attribute(unregister);
57 write_attribute(stop);
58 write_attribute(clear_stats);
59 write_attribute(trigger_gc);
60 write_attribute(prune_cache);
61 write_attribute(flash_vol_create);
63 read_attribute(bucket_size);
64 read_attribute(block_size);
65 read_attribute(nbuckets);
66 read_attribute(tree_depth);
67 read_attribute(root_usage_percent);
68 read_attribute(priority_stats);
69 read_attribute(btree_cache_size);
70 read_attribute(btree_cache_max_chain);
71 read_attribute(cache_available_percent);
72 read_attribute(written);
73 read_attribute(btree_written);
74 read_attribute(metadata_written);
75 read_attribute(active_journal_entries);
77 sysfs_time_stats_attribute(btree_gc, sec, ms);
78 sysfs_time_stats_attribute(btree_split, sec, us);
79 sysfs_time_stats_attribute(btree_sort, ms, us);
80 sysfs_time_stats_attribute(btree_read, ms, us);
82 read_attribute(btree_nodes);
83 read_attribute(btree_used_percent);
84 read_attribute(average_key_size);
85 read_attribute(dirty_data);
86 read_attribute(bset_tree_stats);
88 read_attribute(state);
89 read_attribute(cache_read_races);
90 read_attribute(reclaim);
91 read_attribute(flush_write);
92 read_attribute(retry_flush_write);
93 read_attribute(writeback_keys_done);
94 read_attribute(writeback_keys_failed);
95 read_attribute(io_errors);
96 read_attribute(congested);
97 rw_attribute(congested_read_threshold_us);
98 rw_attribute(congested_write_threshold_us);
100 rw_attribute(sequential_cutoff);
101 rw_attribute(data_csum);
102 rw_attribute(cache_mode);
103 rw_attribute(readahead_cache_policy);
104 rw_attribute(stop_when_cache_set_failed);
105 rw_attribute(writeback_metadata);
106 rw_attribute(writeback_running);
107 rw_attribute(writeback_percent);
108 rw_attribute(writeback_delay);
109 rw_attribute(writeback_rate);
111 rw_attribute(writeback_rate_update_seconds);
112 rw_attribute(writeback_rate_i_term_inverse);
113 rw_attribute(writeback_rate_p_term_inverse);
114 rw_attribute(writeback_rate_minimum);
115 read_attribute(writeback_rate_debug);
117 read_attribute(stripe_size);
118 read_attribute(partial_stripes_expensive);
120 rw_attribute(synchronous);
121 rw_attribute(journal_delay_ms);
122 rw_attribute(io_disable);
123 rw_attribute(discard);
124 rw_attribute(running);
126 rw_attribute(readahead);
127 rw_attribute(errors);
128 rw_attribute(io_error_limit);
129 rw_attribute(io_error_halflife);
130 rw_attribute(verify);
131 rw_attribute(bypass_torture_test);
132 rw_attribute(key_merging_disabled);
133 rw_attribute(gc_always_rewrite);
134 rw_attribute(expensive_debug_checks);
135 rw_attribute(cache_replacement_policy);
136 rw_attribute(btree_shrinker_disabled);
137 rw_attribute(copy_gc_enabled);
140 static ssize_t bch_snprint_string_list(char *buf,
142 const char * const list[],
148 for (i = 0; list[i]; i++)
149 out += snprintf(out, buf + size - out,
150 i == selected ? "[%s] " : "%s ", list[i]);
156 SHOW(__bch_cached_dev)
158 struct cached_dev *dc = container_of(kobj, struct cached_dev,
160 char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
161 int wb = dc->writeback_running;
163 #define var(stat) (dc->stat)
165 if (attr == &sysfs_cache_mode)
166 return bch_snprint_string_list(buf, PAGE_SIZE,
168 BDEV_CACHE_MODE(&dc->sb));
170 if (attr == &sysfs_readahead_cache_policy)
171 return bch_snprint_string_list(buf, PAGE_SIZE,
172 bch_reada_cache_policies,
173 dc->cache_readahead_policy);
175 if (attr == &sysfs_stop_when_cache_set_failed)
176 return bch_snprint_string_list(buf, PAGE_SIZE,
177 bch_stop_on_failure_modes,
178 dc->stop_when_cache_set_failed);
181 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
182 var_printf(verify, "%i");
183 var_printf(bypass_torture_test, "%i");
184 var_printf(writeback_metadata, "%i");
185 var_printf(writeback_running, "%i");
186 var_print(writeback_delay);
187 var_print(writeback_percent);
188 sysfs_hprint(writeback_rate,
189 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
190 sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
191 sysfs_printf(io_error_limit, "%i", dc->error_limit);
192 sysfs_printf(io_disable, "%i", dc->io_disable);
193 var_print(writeback_rate_update_seconds);
194 var_print(writeback_rate_i_term_inverse);
195 var_print(writeback_rate_p_term_inverse);
196 var_print(writeback_rate_minimum);
198 if (attr == &sysfs_writeback_rate_debug) {
202 char proportional[20];
208 * Except for dirty and target, other values should
209 * be 0 if writeback is not running.
212 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
214 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
215 bch_hprint(target, dc->writeback_rate_target << 9);
216 bch_hprint(proportional,
217 wb ? dc->writeback_rate_proportional << 9 : 0);
219 wb ? dc->writeback_rate_integral_scaled << 9 : 0);
220 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
221 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
228 "proportional:\t%s\n"
230 "change:\t\t%s/sec\n"
231 "next io:\t%llims\n",
232 rate, dirty, target, proportional,
233 integral, change, next_io);
236 sysfs_hprint(dirty_data,
237 bcache_dev_sectors_dirty(&dc->disk) << 9);
239 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
240 var_printf(partial_stripes_expensive, "%u");
242 var_hprint(sequential_cutoff);
243 var_hprint(readahead);
245 sysfs_print(running, atomic_read(&dc->running));
246 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
248 if (attr == &sysfs_label) {
249 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
250 buf[SB_LABEL_SIZE + 1] = '\0';
258 SHOW_LOCKED(bch_cached_dev)
262 struct cached_dev *dc = container_of(kobj, struct cached_dev,
266 struct kobj_uevent_env *env;
268 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
269 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
270 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
272 sysfs_strtoul(data_csum, dc->disk.data_csum);
274 d_strtoul(bypass_torture_test);
275 d_strtoul(writeback_metadata);
276 d_strtoul(writeback_running);
277 d_strtoul(writeback_delay);
279 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
281 if (attr == &sysfs_writeback_rate) {
283 long int v = atomic_long_read(&dc->writeback_rate.rate);
285 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
288 atomic_long_set(&dc->writeback_rate.rate, v);
295 sysfs_strtoul_clamp(writeback_rate_update_seconds,
296 dc->writeback_rate_update_seconds,
297 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
298 sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
299 dc->writeback_rate_i_term_inverse,
301 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
302 dc->writeback_rate_p_term_inverse,
304 sysfs_strtoul_clamp(writeback_rate_minimum,
305 dc->writeback_rate_minimum,
308 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
310 if (attr == &sysfs_io_disable) {
311 int v = strtoul_or_return(buf);
313 dc->io_disable = v ? 1 : 0;
316 sysfs_strtoul_clamp(sequential_cutoff,
317 dc->sequential_cutoff,
319 d_strtoi_h(readahead);
321 if (attr == &sysfs_clear_stats)
322 bch_cache_accounting_clear(&dc->accounting);
324 if (attr == &sysfs_running &&
325 strtoul_or_return(buf))
326 bch_cached_dev_run(dc);
328 if (attr == &sysfs_cache_mode) {
329 v = __sysfs_match_string(bch_cache_modes, -1, buf);
333 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
334 SET_BDEV_CACHE_MODE(&dc->sb, v);
335 bch_write_bdev_super(dc, NULL);
339 if (attr == &sysfs_readahead_cache_policy) {
340 v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
344 if ((unsigned int) v != dc->cache_readahead_policy)
345 dc->cache_readahead_policy = v;
348 if (attr == &sysfs_stop_when_cache_set_failed) {
349 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
353 dc->stop_when_cache_set_failed = v;
356 if (attr == &sysfs_label) {
357 if (size > SB_LABEL_SIZE)
359 memcpy(dc->sb.label, buf, size);
360 if (size < SB_LABEL_SIZE)
361 dc->sb.label[size] = '\0';
362 if (size && dc->sb.label[size - 1] == '\n')
363 dc->sb.label[size - 1] = '\0';
364 bch_write_bdev_super(dc, NULL);
366 memcpy(dc->disk.c->uuids[dc->disk.id].label,
368 bch_uuid_write(dc->disk.c);
370 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
373 add_uevent_var(env, "DRIVER=bcache");
374 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
375 add_uevent_var(env, "CACHED_LABEL=%s", buf);
376 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
382 if (attr == &sysfs_attach) {
383 uint8_t set_uuid[16];
385 if (bch_parse_uuid(buf, set_uuid) < 16)
389 list_for_each_entry(c, &bch_cache_sets, list) {
390 v = bch_cached_dev_attach(dc, c, set_uuid);
395 pr_err("Can't attach %s: cache set not found", buf);
399 if (attr == &sysfs_detach && dc->disk.c)
400 bch_cached_dev_detach(dc);
402 if (attr == &sysfs_stop)
403 bcache_device_stop(&dc->disk);
408 STORE(bch_cached_dev)
410 struct cached_dev *dc = container_of(kobj, struct cached_dev,
413 mutex_lock(&bch_register_lock);
414 size = __cached_dev_store(kobj, attr, buf, size);
416 if (attr == &sysfs_writeback_running)
417 bch_writeback_queue(dc);
420 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
421 * a cache set, otherwise it doesn't make sense.
423 if (attr == &sysfs_writeback_percent)
424 if ((dc->disk.c != NULL) &&
425 (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
426 schedule_delayed_work(&dc->writeback_rate_update,
427 dc->writeback_rate_update_seconds * HZ);
429 mutex_unlock(&bch_register_lock);
433 static struct attribute *bch_cached_dev_files[] = {
441 &sysfs_readahead_cache_policy,
442 &sysfs_stop_when_cache_set_failed,
443 &sysfs_writeback_metadata,
444 &sysfs_writeback_running,
445 &sysfs_writeback_delay,
446 &sysfs_writeback_percent,
447 &sysfs_writeback_rate,
448 &sysfs_writeback_rate_update_seconds,
449 &sysfs_writeback_rate_i_term_inverse,
450 &sysfs_writeback_rate_p_term_inverse,
451 &sysfs_writeback_rate_minimum,
452 &sysfs_writeback_rate_debug,
454 &sysfs_io_error_limit,
458 &sysfs_partial_stripes_expensive,
459 &sysfs_sequential_cutoff,
465 #ifdef CONFIG_BCACHE_DEBUG
467 &sysfs_bypass_torture_test,
471 KTYPE(bch_cached_dev);
475 struct bcache_device *d = container_of(kobj, struct bcache_device,
477 struct uuid_entry *u = &d->c->uuids[d->id];
479 sysfs_printf(data_csum, "%i", d->data_csum);
480 sysfs_hprint(size, u->sectors << 9);
482 if (attr == &sysfs_label) {
483 memcpy(buf, u->label, SB_LABEL_SIZE);
484 buf[SB_LABEL_SIZE + 1] = '\0';
492 STORE(__bch_flash_dev)
494 struct bcache_device *d = container_of(kobj, struct bcache_device,
496 struct uuid_entry *u = &d->c->uuids[d->id];
498 sysfs_strtoul(data_csum, d->data_csum);
500 if (attr == &sysfs_size) {
503 strtoi_h_or_return(buf, v);
506 bch_uuid_write(d->c);
507 set_capacity(d->disk, u->sectors);
510 if (attr == &sysfs_label) {
511 memcpy(u->label, buf, SB_LABEL_SIZE);
512 bch_uuid_write(d->c);
515 if (attr == &sysfs_unregister) {
516 set_bit(BCACHE_DEV_DETACHING, &d->flags);
517 bcache_device_stop(d);
522 STORE_LOCKED(bch_flash_dev)
524 static struct attribute *bch_flash_dev_files[] = {
533 KTYPE(bch_flash_dev);
535 struct bset_stats_op {
538 struct bset_stats stats;
541 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
543 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
546 bch_btree_keys_stats(&b->keys, &op->stats);
551 static int bch_bset_print_stats(struct cache_set *c, char *buf)
553 struct bset_stats_op op;
556 memset(&op, 0, sizeof(op));
557 bch_btree_op_init(&op.op, -1);
559 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
563 return snprintf(buf, PAGE_SIZE,
565 "written sets: %zu\n"
566 "unwritten sets: %zu\n"
567 "written key bytes: %zu\n"
568 "unwritten key bytes: %zu\n"
572 op.stats.sets_written, op.stats.sets_unwritten,
573 op.stats.bytes_written, op.stats.bytes_unwritten,
574 op.stats.floats, op.stats.failed);
577 static unsigned int bch_root_usage(struct cache_set *c)
579 unsigned int bytes = 0;
582 struct btree_iter iter;
590 rw_lock(false, b, b->level);
591 } while (b != c->root);
593 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
594 bytes += bkey_bytes(k);
598 return (bytes * 100) / btree_bytes(c);
601 static size_t bch_cache_size(struct cache_set *c)
606 mutex_lock(&c->bucket_lock);
607 list_for_each_entry(b, &c->btree_cache, list)
608 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
610 mutex_unlock(&c->bucket_lock);
614 static unsigned int bch_cache_max_chain(struct cache_set *c)
616 unsigned int ret = 0;
617 struct hlist_head *h;
619 mutex_lock(&c->bucket_lock);
621 for (h = c->bucket_hash;
622 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
625 struct hlist_node *p;
633 mutex_unlock(&c->bucket_lock);
637 static unsigned int bch_btree_used(struct cache_set *c)
639 return div64_u64(c->gc_stats.key_bytes * 100,
640 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
643 static unsigned int bch_average_key_size(struct cache_set *c)
645 return c->gc_stats.nkeys
646 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
650 SHOW(__bch_cache_set)
652 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
654 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
655 sysfs_print(journal_delay_ms, c->journal_delay_ms);
656 sysfs_hprint(bucket_size, bucket_bytes(c));
657 sysfs_hprint(block_size, block_bytes(c));
658 sysfs_print(tree_depth, c->root->level);
659 sysfs_print(root_usage_percent, bch_root_usage(c));
661 sysfs_hprint(btree_cache_size, bch_cache_size(c));
662 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
663 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
665 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
666 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
667 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
668 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
670 sysfs_print(btree_used_percent, bch_btree_used(c));
671 sysfs_print(btree_nodes, c->gc_stats.nodes);
672 sysfs_hprint(average_key_size, bch_average_key_size(c));
674 sysfs_print(cache_read_races,
675 atomic_long_read(&c->cache_read_races));
678 atomic_long_read(&c->reclaim));
680 sysfs_print(flush_write,
681 atomic_long_read(&c->flush_write));
683 sysfs_print(retry_flush_write,
684 atomic_long_read(&c->retry_flush_write));
686 sysfs_print(writeback_keys_done,
687 atomic_long_read(&c->writeback_keys_done));
688 sysfs_print(writeback_keys_failed,
689 atomic_long_read(&c->writeback_keys_failed));
691 if (attr == &sysfs_errors)
692 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
695 /* See count_io_errors for why 88 */
696 sysfs_print(io_error_halflife, c->error_decay * 88);
697 sysfs_print(io_error_limit, c->error_limit);
699 sysfs_hprint(congested,
700 ((uint64_t) bch_get_congested(c)) << 9);
701 sysfs_print(congested_read_threshold_us,
702 c->congested_read_threshold_us);
703 sysfs_print(congested_write_threshold_us,
704 c->congested_write_threshold_us);
706 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
707 sysfs_printf(verify, "%i", c->verify);
708 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
709 sysfs_printf(expensive_debug_checks,
710 "%i", c->expensive_debug_checks);
711 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
712 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
713 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
714 sysfs_printf(io_disable, "%i",
715 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
717 if (attr == &sysfs_bset_tree_stats)
718 return bch_bset_print_stats(c, buf);
722 SHOW_LOCKED(bch_cache_set)
724 STORE(__bch_cache_set)
726 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
729 if (attr == &sysfs_unregister)
730 bch_cache_set_unregister(c);
732 if (attr == &sysfs_stop)
733 bch_cache_set_stop(c);
735 if (attr == &sysfs_synchronous) {
736 bool sync = strtoul_or_return(buf);
738 if (sync != CACHE_SYNC(&c->sb)) {
739 SET_CACHE_SYNC(&c->sb, sync);
740 bcache_write_super(c);
744 if (attr == &sysfs_flash_vol_create) {
748 strtoi_h_or_return(buf, v);
750 r = bch_flash_dev_create(c, v);
755 if (attr == &sysfs_clear_stats) {
756 atomic_long_set(&c->writeback_keys_done, 0);
757 atomic_long_set(&c->writeback_keys_failed, 0);
759 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
760 bch_cache_accounting_clear(&c->accounting);
763 if (attr == &sysfs_trigger_gc) {
765 * Garbage collection thread only works when sectors_to_gc < 0,
766 * when users write to sysfs entry trigger_gc, most of time
767 * they want to forcibly triger gargage collection. Here -1 is
768 * set to c->sectors_to_gc, to make gc_should_run() give a
769 * chance to permit gc thread to run. "give a chance" means
770 * before going into gc_should_run(), there is still chance
771 * that c->sectors_to_gc being set to other positive value. So
772 * writing sysfs entry trigger_gc won't always make sure gc
773 * thread takes effect.
775 atomic_set(&c->sectors_to_gc, -1);
779 if (attr == &sysfs_prune_cache) {
780 struct shrink_control sc;
782 sc.gfp_mask = GFP_KERNEL;
783 sc.nr_to_scan = strtoul_or_return(buf);
784 c->shrink.scan_objects(&c->shrink, &sc);
787 sysfs_strtoul(congested_read_threshold_us,
788 c->congested_read_threshold_us);
789 sysfs_strtoul(congested_write_threshold_us,
790 c->congested_write_threshold_us);
792 if (attr == &sysfs_errors) {
793 v = __sysfs_match_string(error_actions, -1, buf);
800 if (attr == &sysfs_io_error_limit)
801 c->error_limit = strtoul_or_return(buf);
803 /* See count_io_errors() for why 88 */
804 if (attr == &sysfs_io_error_halflife) {
808 ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
810 c->error_decay = v / 88;
816 if (attr == &sysfs_io_disable) {
817 v = strtoul_or_return(buf);
819 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
821 pr_warn("CACHE_SET_IO_DISABLE already set");
823 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
825 pr_warn("CACHE_SET_IO_DISABLE already cleared");
829 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
830 sysfs_strtoul(verify, c->verify);
831 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
832 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
833 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
834 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
835 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
839 STORE_LOCKED(bch_cache_set)
841 SHOW(bch_cache_set_internal)
843 struct cache_set *c = container_of(kobj, struct cache_set, internal);
845 return bch_cache_set_show(&c->kobj, attr, buf);
848 STORE(bch_cache_set_internal)
850 struct cache_set *c = container_of(kobj, struct cache_set, internal);
852 return bch_cache_set_store(&c->kobj, attr, buf, size);
855 static void bch_cache_set_internal_release(struct kobject *k)
859 static struct attribute *bch_cache_set_files[] = {
863 &sysfs_journal_delay_ms,
864 &sysfs_flash_vol_create,
869 &sysfs_root_usage_percent,
870 &sysfs_btree_cache_size,
871 &sysfs_cache_available_percent,
873 &sysfs_average_key_size,
876 &sysfs_io_error_limit,
877 &sysfs_io_error_halflife,
879 &sysfs_congested_read_threshold_us,
880 &sysfs_congested_write_threshold_us,
884 KTYPE(bch_cache_set);
886 static struct attribute *bch_cache_set_internal_files[] = {
887 &sysfs_active_journal_entries,
889 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
890 sysfs_time_stats_attribute_list(btree_split, sec, us)
891 sysfs_time_stats_attribute_list(btree_sort, ms, us)
892 sysfs_time_stats_attribute_list(btree_read, ms, us)
895 &sysfs_btree_used_percent,
896 &sysfs_btree_cache_max_chain,
898 &sysfs_bset_tree_stats,
899 &sysfs_cache_read_races,
902 &sysfs_retry_flush_write,
903 &sysfs_writeback_keys_done,
904 &sysfs_writeback_keys_failed,
908 #ifdef CONFIG_BCACHE_DEBUG
910 &sysfs_key_merging_disabled,
911 &sysfs_expensive_debug_checks,
913 &sysfs_gc_always_rewrite,
914 &sysfs_btree_shrinker_disabled,
915 &sysfs_copy_gc_enabled,
919 KTYPE(bch_cache_set_internal);
921 static int __bch_cache_cmp(const void *l, const void *r)
923 return *((uint16_t *)r) - *((uint16_t *)l);
928 struct cache *ca = container_of(kobj, struct cache, kobj);
930 sysfs_hprint(bucket_size, bucket_bytes(ca));
931 sysfs_hprint(block_size, block_bytes(ca));
932 sysfs_print(nbuckets, ca->sb.nbuckets);
933 sysfs_print(discard, ca->discard);
934 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
935 sysfs_hprint(btree_written,
936 atomic_long_read(&ca->btree_sectors_written) << 9);
937 sysfs_hprint(metadata_written,
938 (atomic_long_read(&ca->meta_sectors_written) +
939 atomic_long_read(&ca->btree_sectors_written)) << 9);
941 sysfs_print(io_errors,
942 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
944 if (attr == &sysfs_cache_replacement_policy)
945 return bch_snprint_string_list(buf, PAGE_SIZE,
946 cache_replacement_policies,
947 CACHE_REPLACEMENT(&ca->sb));
949 if (attr == &sysfs_priority_stats) {
951 size_t n = ca->sb.nbuckets, i;
952 size_t unused = 0, available = 0, dirty = 0, meta = 0;
954 /* Compute 31 quantiles */
955 uint16_t q[31], *p, *cached;
958 cached = p = vmalloc(array_size(sizeof(uint16_t),
963 mutex_lock(&ca->set->bucket_lock);
964 for_each_bucket(b, ca) {
965 if (!GC_SECTORS_USED(b))
967 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
969 if (GC_MARK(b) == GC_MARK_DIRTY)
971 if (GC_MARK(b) == GC_MARK_METADATA)
975 for (i = ca->sb.first_bucket; i < n; i++)
976 p[i] = ca->buckets[i].prio;
977 mutex_unlock(&ca->set->bucket_lock);
979 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
985 unused = ca->sb.nbuckets - n;
987 while (cached < p + n &&
988 *cached == BTREE_PRIO)
991 for (i = 0; i < n; i++)
992 sum += INITIAL_PRIO - cached[i];
995 sum = div64_u64(sum, n);
997 for (i = 0; i < ARRAY_SIZE(q); i++)
998 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
999 (ARRAY_SIZE(q) + 1)];
1003 ret = scnprintf(buf, PAGE_SIZE,
1009 "Sectors per Q: %zu\n"
1011 unused * 100 / (size_t) ca->sb.nbuckets,
1012 available * 100 / (size_t) ca->sb.nbuckets,
1013 dirty * 100 / (size_t) ca->sb.nbuckets,
1014 meta * 100 / (size_t) ca->sb.nbuckets, sum,
1015 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1017 for (i = 0; i < ARRAY_SIZE(q); i++)
1018 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1022 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1029 SHOW_LOCKED(bch_cache)
1033 struct cache *ca = container_of(kobj, struct cache, kobj);
1036 if (attr == &sysfs_discard) {
1037 bool v = strtoul_or_return(buf);
1039 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1042 if (v != CACHE_DISCARD(&ca->sb)) {
1043 SET_CACHE_DISCARD(&ca->sb, v);
1044 bcache_write_super(ca->set);
1048 if (attr == &sysfs_cache_replacement_policy) {
1049 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1053 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1054 mutex_lock(&ca->set->bucket_lock);
1055 SET_CACHE_REPLACEMENT(&ca->sb, v);
1056 mutex_unlock(&ca->set->bucket_lock);
1058 bcache_write_super(ca->set);
1062 if (attr == &sysfs_clear_stats) {
1063 atomic_long_set(&ca->sectors_written, 0);
1064 atomic_long_set(&ca->btree_sectors_written, 0);
1065 atomic_long_set(&ca->meta_sectors_written, 0);
1066 atomic_set(&ca->io_count, 0);
1067 atomic_set(&ca->io_errors, 0);
1072 STORE_LOCKED(bch_cache)
1074 static struct attribute *bch_cache_files[] = {
1078 &sysfs_priority_stats,
1081 &sysfs_btree_written,
1082 &sysfs_metadata_written,
1085 &sysfs_cache_replacement_policy,