2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include "writeback.h"
14 #include <linux/blkdev.h>
15 #include <linux/sort.h>
17 static const char * const cache_replacement_policies[] = {
24 static const char * const error_actions[] = {
30 write_attribute(attach);
31 write_attribute(detach);
32 write_attribute(unregister);
33 write_attribute(stop);
34 write_attribute(clear_stats);
35 write_attribute(trigger_gc);
36 write_attribute(prune_cache);
37 write_attribute(flash_vol_create);
39 read_attribute(bucket_size);
40 read_attribute(block_size);
41 read_attribute(nbuckets);
42 read_attribute(tree_depth);
43 read_attribute(root_usage_percent);
44 read_attribute(priority_stats);
45 read_attribute(btree_cache_size);
46 read_attribute(btree_cache_max_chain);
47 read_attribute(cache_available_percent);
48 read_attribute(written);
49 read_attribute(btree_written);
50 read_attribute(metadata_written);
51 read_attribute(active_journal_entries);
53 sysfs_time_stats_attribute(btree_gc, sec, ms);
54 sysfs_time_stats_attribute(btree_split, sec, us);
55 sysfs_time_stats_attribute(btree_sort, ms, us);
56 sysfs_time_stats_attribute(btree_read, ms, us);
58 read_attribute(btree_nodes);
59 read_attribute(btree_used_percent);
60 read_attribute(average_key_size);
61 read_attribute(dirty_data);
62 read_attribute(bset_tree_stats);
64 read_attribute(state);
65 read_attribute(cache_read_races);
66 read_attribute(writeback_keys_done);
67 read_attribute(writeback_keys_failed);
68 read_attribute(io_errors);
69 read_attribute(congested);
70 rw_attribute(congested_read_threshold_us);
71 rw_attribute(congested_write_threshold_us);
73 rw_attribute(sequential_cutoff);
74 rw_attribute(data_csum);
75 rw_attribute(cache_mode);
76 rw_attribute(writeback_metadata);
77 rw_attribute(writeback_running);
78 rw_attribute(writeback_percent);
79 rw_attribute(writeback_delay);
80 rw_attribute(writeback_rate);
82 rw_attribute(writeback_rate_update_seconds);
83 rw_attribute(writeback_rate_d_term);
84 rw_attribute(writeback_rate_p_term_inverse);
85 read_attribute(writeback_rate_debug);
87 read_attribute(stripe_size);
88 read_attribute(partial_stripes_expensive);
90 rw_attribute(synchronous);
91 rw_attribute(journal_delay_ms);
92 rw_attribute(discard);
93 rw_attribute(running);
95 rw_attribute(readahead);
97 rw_attribute(io_error_limit);
98 rw_attribute(io_error_halflife);
100 rw_attribute(bypass_torture_test);
101 rw_attribute(key_merging_disabled);
102 rw_attribute(gc_always_rewrite);
103 rw_attribute(expensive_debug_checks);
104 rw_attribute(cache_replacement_policy);
105 rw_attribute(btree_shrinker_disabled);
106 rw_attribute(copy_gc_enabled);
109 SHOW(__bch_cached_dev)
111 struct cached_dev *dc = container_of(kobj, struct cached_dev,
113 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
115 #define var(stat) (dc->stat)
117 if (attr == &sysfs_cache_mode)
118 return bch_snprint_string_list(buf, PAGE_SIZE,
120 BDEV_CACHE_MODE(&dc->sb));
122 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
123 var_printf(verify, "%i");
124 var_printf(bypass_torture_test, "%i");
125 var_printf(writeback_metadata, "%i");
126 var_printf(writeback_running, "%i");
127 var_print(writeback_delay);
128 var_print(writeback_percent);
129 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
131 var_print(writeback_rate_update_seconds);
132 var_print(writeback_rate_d_term);
133 var_print(writeback_rate_p_term_inverse);
135 if (attr == &sysfs_writeback_rate_debug) {
139 char proportional[20];
144 bch_hprint(rate, dc->writeback_rate.rate << 9);
145 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
146 bch_hprint(target, dc->writeback_rate_target << 9);
147 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
148 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
149 bch_hprint(change, dc->writeback_rate_change << 9);
151 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
158 "proportional:\t%s\n"
160 "change:\t\t%s/sec\n"
161 "next io:\t%llims\n",
162 rate, dirty, target, proportional,
163 derivative, change, next_io);
166 sysfs_hprint(dirty_data,
167 bcache_dev_sectors_dirty(&dc->disk) << 9);
169 sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
170 var_printf(partial_stripes_expensive, "%u");
172 var_hprint(sequential_cutoff);
173 var_hprint(readahead);
175 sysfs_print(running, atomic_read(&dc->running));
176 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
178 if (attr == &sysfs_label) {
179 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
180 buf[SB_LABEL_SIZE + 1] = '\0';
188 SHOW_LOCKED(bch_cached_dev)
192 struct cached_dev *dc = container_of(kobj, struct cached_dev,
196 struct kobj_uevent_env *env;
198 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
199 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
200 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
202 sysfs_strtoul(data_csum, dc->disk.data_csum);
204 d_strtoul(bypass_torture_test);
205 d_strtoul(writeback_metadata);
206 d_strtoul(writeback_running);
207 d_strtoul(writeback_delay);
209 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
211 sysfs_strtoul_clamp(writeback_rate,
212 dc->writeback_rate.rate, 1, INT_MAX);
214 d_strtoul_nonzero(writeback_rate_update_seconds);
215 d_strtoul(writeback_rate_d_term);
216 d_strtoul_nonzero(writeback_rate_p_term_inverse);
218 sysfs_strtoul_clamp(sequential_cutoff,
219 dc->sequential_cutoff,
221 d_strtoi_h(readahead);
223 if (attr == &sysfs_clear_stats)
224 bch_cache_accounting_clear(&dc->accounting);
226 if (attr == &sysfs_running &&
227 strtoul_or_return(buf))
228 bch_cached_dev_run(dc);
230 if (attr == &sysfs_cache_mode) {
231 v = bch_read_string_list(buf, bch_cache_modes + 1);
236 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
237 SET_BDEV_CACHE_MODE(&dc->sb, v);
238 bch_write_bdev_super(dc, NULL);
242 if (attr == &sysfs_label) {
243 if (size > SB_LABEL_SIZE)
245 memcpy(dc->sb.label, buf, size);
246 if (size < SB_LABEL_SIZE)
247 dc->sb.label[size] = '\0';
248 if (size && dc->sb.label[size - 1] == '\n')
249 dc->sb.label[size - 1] = '\0';
250 bch_write_bdev_super(dc, NULL);
252 memcpy(dc->disk.c->uuids[dc->disk.id].label,
254 bch_uuid_write(dc->disk.c);
256 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
259 add_uevent_var(env, "DRIVER=bcache");
260 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
261 add_uevent_var(env, "CACHED_LABEL=%s", buf);
263 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
267 if (attr == &sysfs_attach) {
268 uint8_t set_uuid[16];
270 if (bch_parse_uuid(buf, set_uuid) < 16)
274 list_for_each_entry(c, &bch_cache_sets, list) {
275 v = bch_cached_dev_attach(dc, c, set_uuid);
280 pr_err("Can't attach %s: cache set not found", buf);
284 if (attr == &sysfs_detach && dc->disk.c)
285 bch_cached_dev_detach(dc);
287 if (attr == &sysfs_stop)
288 bcache_device_stop(&dc->disk);
293 STORE(bch_cached_dev)
295 struct cached_dev *dc = container_of(kobj, struct cached_dev,
298 mutex_lock(&bch_register_lock);
299 size = __cached_dev_store(kobj, attr, buf, size);
301 if (attr == &sysfs_writeback_running)
302 bch_writeback_queue(dc);
304 if (attr == &sysfs_writeback_percent)
305 schedule_delayed_work(&dc->writeback_rate_update,
306 dc->writeback_rate_update_seconds * HZ);
308 mutex_unlock(&bch_register_lock);
312 static struct attribute *bch_cached_dev_files[] = {
320 &sysfs_writeback_metadata,
321 &sysfs_writeback_running,
322 &sysfs_writeback_delay,
323 &sysfs_writeback_percent,
324 &sysfs_writeback_rate,
325 &sysfs_writeback_rate_update_seconds,
326 &sysfs_writeback_rate_d_term,
327 &sysfs_writeback_rate_p_term_inverse,
328 &sysfs_writeback_rate_debug,
331 &sysfs_partial_stripes_expensive,
332 &sysfs_sequential_cutoff,
338 #ifdef CONFIG_BCACHE_DEBUG
340 &sysfs_bypass_torture_test,
344 KTYPE(bch_cached_dev);
348 struct bcache_device *d = container_of(kobj, struct bcache_device,
350 struct uuid_entry *u = &d->c->uuids[d->id];
352 sysfs_printf(data_csum, "%i", d->data_csum);
353 sysfs_hprint(size, u->sectors << 9);
355 if (attr == &sysfs_label) {
356 memcpy(buf, u->label, SB_LABEL_SIZE);
357 buf[SB_LABEL_SIZE + 1] = '\0';
365 STORE(__bch_flash_dev)
367 struct bcache_device *d = container_of(kobj, struct bcache_device,
369 struct uuid_entry *u = &d->c->uuids[d->id];
371 sysfs_strtoul(data_csum, d->data_csum);
373 if (attr == &sysfs_size) {
375 strtoi_h_or_return(buf, v);
378 bch_uuid_write(d->c);
379 set_capacity(d->disk, u->sectors);
382 if (attr == &sysfs_label) {
383 memcpy(u->label, buf, SB_LABEL_SIZE);
384 bch_uuid_write(d->c);
387 if (attr == &sysfs_unregister) {
388 set_bit(BCACHE_DEV_DETACHING, &d->flags);
389 bcache_device_stop(d);
394 STORE_LOCKED(bch_flash_dev)
396 static struct attribute *bch_flash_dev_files[] = {
405 KTYPE(bch_flash_dev);
407 struct bset_stats_op {
410 struct bset_stats stats;
413 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
415 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
418 bch_btree_keys_stats(&b->keys, &op->stats);
423 static int bch_bset_print_stats(struct cache_set *c, char *buf)
425 struct bset_stats_op op;
428 memset(&op, 0, sizeof(op));
429 bch_btree_op_init(&op.op, -1);
431 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
435 return snprintf(buf, PAGE_SIZE,
437 "written sets: %zu\n"
438 "unwritten sets: %zu\n"
439 "written key bytes: %zu\n"
440 "unwritten key bytes: %zu\n"
444 op.stats.sets_written, op.stats.sets_unwritten,
445 op.stats.bytes_written, op.stats.bytes_unwritten,
446 op.stats.floats, op.stats.failed);
449 static unsigned bch_root_usage(struct cache_set *c)
454 struct btree_iter iter;
462 rw_lock(false, b, b->level);
463 } while (b != c->root);
465 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
466 bytes += bkey_bytes(k);
470 return (bytes * 100) / btree_bytes(c);
473 static size_t bch_cache_size(struct cache_set *c)
478 mutex_lock(&c->bucket_lock);
479 list_for_each_entry(b, &c->btree_cache, list)
480 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
482 mutex_unlock(&c->bucket_lock);
486 static unsigned bch_cache_max_chain(struct cache_set *c)
489 struct hlist_head *h;
491 mutex_lock(&c->bucket_lock);
493 for (h = c->bucket_hash;
494 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
497 struct hlist_node *p;
505 mutex_unlock(&c->bucket_lock);
509 static unsigned bch_btree_used(struct cache_set *c)
511 return div64_u64(c->gc_stats.key_bytes * 100,
512 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
515 static unsigned bch_average_key_size(struct cache_set *c)
517 return c->gc_stats.nkeys
518 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
522 SHOW(__bch_cache_set)
524 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
526 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
527 sysfs_print(journal_delay_ms, c->journal_delay_ms);
528 sysfs_hprint(bucket_size, bucket_bytes(c));
529 sysfs_hprint(block_size, block_bytes(c));
530 sysfs_print(tree_depth, c->root->level);
531 sysfs_print(root_usage_percent, bch_root_usage(c));
533 sysfs_hprint(btree_cache_size, bch_cache_size(c));
534 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
535 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
537 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
538 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
539 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
540 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
542 sysfs_print(btree_used_percent, bch_btree_used(c));
543 sysfs_print(btree_nodes, c->gc_stats.nodes);
544 sysfs_hprint(average_key_size, bch_average_key_size(c));
546 sysfs_print(cache_read_races,
547 atomic_long_read(&c->cache_read_races));
549 sysfs_print(writeback_keys_done,
550 atomic_long_read(&c->writeback_keys_done));
551 sysfs_print(writeback_keys_failed,
552 atomic_long_read(&c->writeback_keys_failed));
554 if (attr == &sysfs_errors)
555 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
558 /* See count_io_errors for why 88 */
559 sysfs_print(io_error_halflife, c->error_decay * 88);
560 sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
562 sysfs_hprint(congested,
563 ((uint64_t) bch_get_congested(c)) << 9);
564 sysfs_print(congested_read_threshold_us,
565 c->congested_read_threshold_us);
566 sysfs_print(congested_write_threshold_us,
567 c->congested_write_threshold_us);
569 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
570 sysfs_printf(verify, "%i", c->verify);
571 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
572 sysfs_printf(expensive_debug_checks,
573 "%i", c->expensive_debug_checks);
574 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
575 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
576 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
578 if (attr == &sysfs_bset_tree_stats)
579 return bch_bset_print_stats(c, buf);
583 SHOW_LOCKED(bch_cache_set)
585 STORE(__bch_cache_set)
587 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
589 if (attr == &sysfs_unregister)
590 bch_cache_set_unregister(c);
592 if (attr == &sysfs_stop)
593 bch_cache_set_stop(c);
595 if (attr == &sysfs_synchronous) {
596 bool sync = strtoul_or_return(buf);
598 if (sync != CACHE_SYNC(&c->sb)) {
599 SET_CACHE_SYNC(&c->sb, sync);
600 bcache_write_super(c);
604 if (attr == &sysfs_flash_vol_create) {
607 strtoi_h_or_return(buf, v);
609 r = bch_flash_dev_create(c, v);
614 if (attr == &sysfs_clear_stats) {
615 atomic_long_set(&c->writeback_keys_done, 0);
616 atomic_long_set(&c->writeback_keys_failed, 0);
618 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
619 bch_cache_accounting_clear(&c->accounting);
622 if (attr == &sysfs_trigger_gc)
625 if (attr == &sysfs_prune_cache) {
626 struct shrink_control sc;
627 sc.gfp_mask = GFP_KERNEL;
628 sc.nr_to_scan = strtoul_or_return(buf);
629 c->shrink.scan_objects(&c->shrink, &sc);
632 sysfs_strtoul(congested_read_threshold_us,
633 c->congested_read_threshold_us);
634 sysfs_strtoul(congested_write_threshold_us,
635 c->congested_write_threshold_us);
637 if (attr == &sysfs_errors) {
638 ssize_t v = bch_read_string_list(buf, error_actions);
646 if (attr == &sysfs_io_error_limit)
647 c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
649 /* See count_io_errors() for why 88 */
650 if (attr == &sysfs_io_error_halflife) {
654 ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
656 c->error_decay = v / 88;
662 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
663 sysfs_strtoul(verify, c->verify);
664 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
665 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
666 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
667 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
668 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
672 STORE_LOCKED(bch_cache_set)
674 SHOW(bch_cache_set_internal)
676 struct cache_set *c = container_of(kobj, struct cache_set, internal);
677 return bch_cache_set_show(&c->kobj, attr, buf);
680 STORE(bch_cache_set_internal)
682 struct cache_set *c = container_of(kobj, struct cache_set, internal);
683 return bch_cache_set_store(&c->kobj, attr, buf, size);
686 static void bch_cache_set_internal_release(struct kobject *k)
690 static struct attribute *bch_cache_set_files[] = {
694 &sysfs_journal_delay_ms,
695 &sysfs_flash_vol_create,
700 &sysfs_root_usage_percent,
701 &sysfs_btree_cache_size,
702 &sysfs_cache_available_percent,
704 &sysfs_average_key_size,
707 &sysfs_io_error_limit,
708 &sysfs_io_error_halflife,
710 &sysfs_congested_read_threshold_us,
711 &sysfs_congested_write_threshold_us,
715 KTYPE(bch_cache_set);
717 static struct attribute *bch_cache_set_internal_files[] = {
718 &sysfs_active_journal_entries,
720 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
721 sysfs_time_stats_attribute_list(btree_split, sec, us)
722 sysfs_time_stats_attribute_list(btree_sort, ms, us)
723 sysfs_time_stats_attribute_list(btree_read, ms, us)
726 &sysfs_btree_used_percent,
727 &sysfs_btree_cache_max_chain,
729 &sysfs_bset_tree_stats,
730 &sysfs_cache_read_races,
731 &sysfs_writeback_keys_done,
732 &sysfs_writeback_keys_failed,
736 #ifdef CONFIG_BCACHE_DEBUG
738 &sysfs_key_merging_disabled,
739 &sysfs_expensive_debug_checks,
741 &sysfs_gc_always_rewrite,
742 &sysfs_btree_shrinker_disabled,
743 &sysfs_copy_gc_enabled,
746 KTYPE(bch_cache_set_internal);
750 struct cache *ca = container_of(kobj, struct cache, kobj);
752 sysfs_hprint(bucket_size, bucket_bytes(ca));
753 sysfs_hprint(block_size, block_bytes(ca));
754 sysfs_print(nbuckets, ca->sb.nbuckets);
755 sysfs_print(discard, ca->discard);
756 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
757 sysfs_hprint(btree_written,
758 atomic_long_read(&ca->btree_sectors_written) << 9);
759 sysfs_hprint(metadata_written,
760 (atomic_long_read(&ca->meta_sectors_written) +
761 atomic_long_read(&ca->btree_sectors_written)) << 9);
763 sysfs_print(io_errors,
764 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
766 if (attr == &sysfs_cache_replacement_policy)
767 return bch_snprint_string_list(buf, PAGE_SIZE,
768 cache_replacement_policies,
769 CACHE_REPLACEMENT(&ca->sb));
771 if (attr == &sysfs_priority_stats) {
772 int cmp(const void *l, const void *r)
773 { return *((uint16_t *) r) - *((uint16_t *) l); }
776 size_t n = ca->sb.nbuckets, i;
777 size_t unused = 0, available = 0, dirty = 0, meta = 0;
779 /* Compute 31 quantiles */
780 uint16_t q[31], *p, *cached;
783 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
787 mutex_lock(&ca->set->bucket_lock);
788 for_each_bucket(b, ca) {
789 if (!GC_SECTORS_USED(b))
791 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
793 if (GC_MARK(b) == GC_MARK_DIRTY)
795 if (GC_MARK(b) == GC_MARK_METADATA)
799 for (i = ca->sb.first_bucket; i < n; i++)
800 p[i] = ca->buckets[i].prio;
801 mutex_unlock(&ca->set->bucket_lock);
803 sort(p, n, sizeof(uint16_t), cmp, NULL);
809 unused = ca->sb.nbuckets - n;
811 while (cached < p + n &&
812 *cached == BTREE_PRIO)
815 for (i = 0; i < n; i++)
816 sum += INITIAL_PRIO - cached[i];
821 for (i = 0; i < ARRAY_SIZE(q); i++)
822 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
823 (ARRAY_SIZE(q) + 1)];
827 ret = scnprintf(buf, PAGE_SIZE,
833 "Sectors per Q: %zu\n"
835 unused * 100 / (size_t) ca->sb.nbuckets,
836 available * 100 / (size_t) ca->sb.nbuckets,
837 dirty * 100 / (size_t) ca->sb.nbuckets,
838 meta * 100 / (size_t) ca->sb.nbuckets, sum,
839 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
841 for (i = 0; i < ARRAY_SIZE(q); i++)
842 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
846 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
853 SHOW_LOCKED(bch_cache)
857 struct cache *ca = container_of(kobj, struct cache, kobj);
859 if (attr == &sysfs_discard) {
860 bool v = strtoul_or_return(buf);
862 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
865 if (v != CACHE_DISCARD(&ca->sb)) {
866 SET_CACHE_DISCARD(&ca->sb, v);
867 bcache_write_super(ca->set);
871 if (attr == &sysfs_cache_replacement_policy) {
872 ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
877 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
878 mutex_lock(&ca->set->bucket_lock);
879 SET_CACHE_REPLACEMENT(&ca->sb, v);
880 mutex_unlock(&ca->set->bucket_lock);
882 bcache_write_super(ca->set);
886 if (attr == &sysfs_clear_stats) {
887 atomic_long_set(&ca->sectors_written, 0);
888 atomic_long_set(&ca->btree_sectors_written, 0);
889 atomic_long_set(&ca->meta_sectors_written, 0);
890 atomic_set(&ca->io_count, 0);
891 atomic_set(&ca->io_errors, 0);
896 STORE_LOCKED(bch_cache)
898 static struct attribute *bch_cache_files[] = {
902 &sysfs_priority_stats,
905 &sysfs_btree_written,
906 &sysfs_metadata_written,
909 &sysfs_cache_replacement_policy,