GNU Linux-libre 4.14.332-gnu1
[releases.git] / drivers / md / bcache / super.c
1 /*
2  * bcache setup/teardown code, and some metadata io - read a superblock and
3  * figure out what to do with it.
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "extents.h"
13 #include "request.h"
14 #include "writeback.h"
15
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/debugfs.h>
19 #include <linux/genhd.h>
20 #include <linux/idr.h>
21 #include <linux/kthread.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/reboot.h>
25 #include <linux/sysfs.h>
26
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
29
30 static const char bcache_magic[] = {
31         0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
32         0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
33 };
34
35 static const char invalid_uuid[] = {
36         0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
37         0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
38 };
39
40 /* Default is -1; we skip past it for struct cached_dev's cache mode */
41 const char * const bch_cache_modes[] = {
42         "default",
43         "writethrough",
44         "writeback",
45         "writearound",
46         "none",
47         NULL
48 };
49
50 static struct kobject *bcache_kobj;
51 struct mutex bch_register_lock;
52 LIST_HEAD(bch_cache_sets);
53 static LIST_HEAD(uncached_devices);
54
55 static int bcache_major;
56 static DEFINE_IDA(bcache_minor);
57 static wait_queue_head_t unregister_wait;
58 struct workqueue_struct *bcache_wq;
59
60 #define BTREE_MAX_PAGES         (256 * 1024 / PAGE_SIZE)
61 #define BCACHE_MINORS           16 /* partition support */
62
63 /* Superblock */
64
65 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
66                               struct page **res)
67 {
68         const char *err;
69         struct cache_sb *s;
70         struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
71         unsigned i;
72
73         if (!bh)
74                 return "IO error";
75
76         s = (struct cache_sb *) bh->b_data;
77
78         sb->offset              = le64_to_cpu(s->offset);
79         sb->version             = le64_to_cpu(s->version);
80
81         memcpy(sb->magic,       s->magic, 16);
82         memcpy(sb->uuid,        s->uuid, 16);
83         memcpy(sb->set_uuid,    s->set_uuid, 16);
84         memcpy(sb->label,       s->label, SB_LABEL_SIZE);
85
86         sb->flags               = le64_to_cpu(s->flags);
87         sb->seq                 = le64_to_cpu(s->seq);
88         sb->last_mount          = le32_to_cpu(s->last_mount);
89         sb->first_bucket        = le16_to_cpu(s->first_bucket);
90         sb->keys                = le16_to_cpu(s->keys);
91
92         for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
93                 sb->d[i] = le64_to_cpu(s->d[i]);
94
95         pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
96                  sb->version, sb->flags, sb->seq, sb->keys);
97
98         err = "Not a bcache superblock";
99         if (sb->offset != SB_SECTOR)
100                 goto err;
101
102         if (memcmp(sb->magic, bcache_magic, 16))
103                 goto err;
104
105         err = "Too many journal buckets";
106         if (sb->keys > SB_JOURNAL_BUCKETS)
107                 goto err;
108
109         err = "Bad checksum";
110         if (s->csum != csum_set(s))
111                 goto err;
112
113         err = "Bad UUID";
114         if (bch_is_zero(sb->uuid, 16))
115                 goto err;
116
117         sb->block_size  = le16_to_cpu(s->block_size);
118
119         err = "Superblock block size smaller than device block size";
120         if (sb->block_size << 9 < bdev_logical_block_size(bdev))
121                 goto err;
122
123         switch (sb->version) {
124         case BCACHE_SB_VERSION_BDEV:
125                 sb->data_offset = BDEV_DATA_START_DEFAULT;
126                 break;
127         case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
128                 sb->data_offset = le64_to_cpu(s->data_offset);
129
130                 err = "Bad data offset";
131                 if (sb->data_offset < BDEV_DATA_START_DEFAULT)
132                         goto err;
133
134                 break;
135         case BCACHE_SB_VERSION_CDEV:
136         case BCACHE_SB_VERSION_CDEV_WITH_UUID:
137                 sb->nbuckets    = le64_to_cpu(s->nbuckets);
138                 sb->bucket_size = le16_to_cpu(s->bucket_size);
139
140                 sb->nr_in_set   = le16_to_cpu(s->nr_in_set);
141                 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
142
143                 err = "Too many buckets";
144                 if (sb->nbuckets > LONG_MAX)
145                         goto err;
146
147                 err = "Not enough buckets";
148                 if (sb->nbuckets < 1 << 7)
149                         goto err;
150
151                 err = "Bad block/bucket size";
152                 if (!is_power_of_2(sb->block_size) ||
153                     sb->block_size > PAGE_SECTORS ||
154                     !is_power_of_2(sb->bucket_size) ||
155                     sb->bucket_size < PAGE_SECTORS)
156                         goto err;
157
158                 err = "Invalid superblock: device too small";
159                 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
160                         goto err;
161
162                 err = "Bad UUID";
163                 if (bch_is_zero(sb->set_uuid, 16))
164                         goto err;
165
166                 err = "Bad cache device number in set";
167                 if (!sb->nr_in_set ||
168                     sb->nr_in_set <= sb->nr_this_dev ||
169                     sb->nr_in_set > MAX_CACHES_PER_SET)
170                         goto err;
171
172                 err = "Journal buckets not sequential";
173                 for (i = 0; i < sb->keys; i++)
174                         if (sb->d[i] != sb->first_bucket + i)
175                                 goto err;
176
177                 err = "Too many journal buckets";
178                 if (sb->first_bucket + sb->keys > sb->nbuckets)
179                         goto err;
180
181                 err = "Invalid superblock: first bucket comes before end of super";
182                 if (sb->first_bucket * sb->bucket_size < 16)
183                         goto err;
184
185                 break;
186         default:
187                 err = "Unsupported superblock version";
188                 goto err;
189         }
190
191         sb->last_mount = get_seconds();
192         err = NULL;
193
194         get_page(bh->b_page);
195         *res = bh->b_page;
196 err:
197         put_bh(bh);
198         return err;
199 }
200
201 static void write_bdev_super_endio(struct bio *bio)
202 {
203         struct cached_dev *dc = bio->bi_private;
204         /* XXX: error checking */
205
206         closure_put(&dc->sb_write);
207 }
208
209 static void __write_super(struct cache_sb *sb, struct bio *bio)
210 {
211         struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
212         unsigned i;
213
214         bio->bi_iter.bi_sector  = SB_SECTOR;
215         bio->bi_iter.bi_size    = SB_SIZE;
216         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
217         bch_bio_map(bio, NULL);
218
219         out->offset             = cpu_to_le64(sb->offset);
220         out->version            = cpu_to_le64(sb->version);
221
222         memcpy(out->uuid,       sb->uuid, 16);
223         memcpy(out->set_uuid,   sb->set_uuid, 16);
224         memcpy(out->label,      sb->label, SB_LABEL_SIZE);
225
226         out->flags              = cpu_to_le64(sb->flags);
227         out->seq                = cpu_to_le64(sb->seq);
228
229         out->last_mount         = cpu_to_le32(sb->last_mount);
230         out->first_bucket       = cpu_to_le16(sb->first_bucket);
231         out->keys               = cpu_to_le16(sb->keys);
232
233         for (i = 0; i < sb->keys; i++)
234                 out->d[i] = cpu_to_le64(sb->d[i]);
235
236         out->csum = csum_set(out);
237
238         pr_debug("ver %llu, flags %llu, seq %llu",
239                  sb->version, sb->flags, sb->seq);
240
241         submit_bio(bio);
242 }
243
244 static void bch_write_bdev_super_unlock(struct closure *cl)
245 {
246         struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
247
248         up(&dc->sb_write_mutex);
249 }
250
251 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
252 {
253         struct closure *cl = &dc->sb_write;
254         struct bio *bio = &dc->sb_bio;
255
256         down(&dc->sb_write_mutex);
257         closure_init(cl, parent);
258
259         bio_reset(bio);
260         bio_set_dev(bio, dc->bdev);
261         bio->bi_end_io  = write_bdev_super_endio;
262         bio->bi_private = dc;
263
264         closure_get(cl);
265         __write_super(&dc->sb, bio);
266
267         closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
268 }
269
270 static void write_super_endio(struct bio *bio)
271 {
272         struct cache *ca = bio->bi_private;
273
274         bch_count_io_errors(ca, bio->bi_status, "writing superblock");
275         closure_put(&ca->set->sb_write);
276 }
277
278 static void bcache_write_super_unlock(struct closure *cl)
279 {
280         struct cache_set *c = container_of(cl, struct cache_set, sb_write);
281
282         up(&c->sb_write_mutex);
283 }
284
285 void bcache_write_super(struct cache_set *c)
286 {
287         struct closure *cl = &c->sb_write;
288         struct cache *ca;
289         unsigned i;
290
291         down(&c->sb_write_mutex);
292         closure_init(cl, &c->cl);
293
294         c->sb.seq++;
295
296         for_each_cache(ca, c, i) {
297                 struct bio *bio = &ca->sb_bio;
298
299                 ca->sb.version          = BCACHE_SB_VERSION_CDEV_WITH_UUID;
300                 ca->sb.seq              = c->sb.seq;
301                 ca->sb.last_mount       = c->sb.last_mount;
302
303                 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
304
305                 bio_reset(bio);
306                 bio_set_dev(bio, ca->bdev);
307                 bio->bi_end_io  = write_super_endio;
308                 bio->bi_private = ca;
309
310                 closure_get(cl);
311                 __write_super(&ca->sb, bio);
312         }
313
314         closure_return_with_destructor(cl, bcache_write_super_unlock);
315 }
316
317 /* UUID io */
318
319 static void uuid_endio(struct bio *bio)
320 {
321         struct closure *cl = bio->bi_private;
322         struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
323
324         cache_set_err_on(bio->bi_status, c, "accessing uuids");
325         bch_bbio_free(bio, c);
326         closure_put(cl);
327 }
328
329 static void uuid_io_unlock(struct closure *cl)
330 {
331         struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
332
333         up(&c->uuid_write_mutex);
334 }
335
336 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
337                     struct bkey *k, struct closure *parent)
338 {
339         struct closure *cl = &c->uuid_write;
340         struct uuid_entry *u;
341         unsigned i;
342         char buf[80];
343
344         BUG_ON(!parent);
345         down(&c->uuid_write_mutex);
346         closure_init(cl, parent);
347
348         for (i = 0; i < KEY_PTRS(k); i++) {
349                 struct bio *bio = bch_bbio_alloc(c);
350
351                 bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
352                 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
353
354                 bio->bi_end_io  = uuid_endio;
355                 bio->bi_private = cl;
356                 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
357                 bch_bio_map(bio, c->uuids);
358
359                 bch_submit_bbio(bio, c, k, i);
360
361                 if (op != REQ_OP_WRITE)
362                         break;
363         }
364
365         bch_extent_to_text(buf, sizeof(buf), k);
366         pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
367
368         for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
369                 if (!bch_is_zero(u->uuid, 16))
370                         pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
371                                  u - c->uuids, u->uuid, u->label,
372                                  u->first_reg, u->last_reg, u->invalidated);
373
374         closure_return_with_destructor(cl, uuid_io_unlock);
375 }
376
377 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
378 {
379         struct bkey *k = &j->uuid_bucket;
380
381         if (__bch_btree_ptr_invalid(c, k))
382                 return "bad uuid pointer";
383
384         bkey_copy(&c->uuid_bucket, k);
385         uuid_io(c, REQ_OP_READ, 0, k, cl);
386
387         if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
388                 struct uuid_entry_v0    *u0 = (void *) c->uuids;
389                 struct uuid_entry       *u1 = (void *) c->uuids;
390                 int i;
391
392                 closure_sync(cl);
393
394                 /*
395                  * Since the new uuid entry is bigger than the old, we have to
396                  * convert starting at the highest memory address and work down
397                  * in order to do it in place
398                  */
399
400                 for (i = c->nr_uuids - 1;
401                      i >= 0;
402                      --i) {
403                         memcpy(u1[i].uuid,      u0[i].uuid, 16);
404                         memcpy(u1[i].label,     u0[i].label, 32);
405
406                         u1[i].first_reg         = u0[i].first_reg;
407                         u1[i].last_reg          = u0[i].last_reg;
408                         u1[i].invalidated       = u0[i].invalidated;
409
410                         u1[i].flags     = 0;
411                         u1[i].sectors   = 0;
412                 }
413         }
414
415         return NULL;
416 }
417
418 static int __uuid_write(struct cache_set *c)
419 {
420         BKEY_PADDED(key) k;
421         struct closure cl;
422         closure_init_stack(&cl);
423
424         lockdep_assert_held(&bch_register_lock);
425
426         if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
427                 return 1;
428
429         SET_KEY_SIZE(&k.key, c->sb.bucket_size);
430         uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
431         closure_sync(&cl);
432
433         bkey_copy(&c->uuid_bucket, &k.key);
434         bkey_put(c, &k.key);
435         return 0;
436 }
437
438 int bch_uuid_write(struct cache_set *c)
439 {
440         int ret = __uuid_write(c);
441
442         if (!ret)
443                 bch_journal_meta(c, NULL);
444
445         return ret;
446 }
447
448 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
449 {
450         struct uuid_entry *u;
451
452         for (u = c->uuids;
453              u < c->uuids + c->nr_uuids; u++)
454                 if (!memcmp(u->uuid, uuid, 16))
455                         return u;
456
457         return NULL;
458 }
459
460 static struct uuid_entry *uuid_find_empty(struct cache_set *c)
461 {
462         static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
463         return uuid_find(c, zero_uuid);
464 }
465
466 /*
467  * Bucket priorities/gens:
468  *
469  * For each bucket, we store on disk its
470    * 8 bit gen
471    * 16 bit priority
472  *
473  * See alloc.c for an explanation of the gen. The priority is used to implement
474  * lru (and in the future other) cache replacement policies; for most purposes
475  * it's just an opaque integer.
476  *
477  * The gens and the priorities don't have a whole lot to do with each other, and
478  * it's actually the gens that must be written out at specific times - it's no
479  * big deal if the priorities don't get written, if we lose them we just reuse
480  * buckets in suboptimal order.
481  *
482  * On disk they're stored in a packed array, and in as many buckets are required
483  * to fit them all. The buckets we use to store them form a list; the journal
484  * header points to the first bucket, the first bucket points to the second
485  * bucket, et cetera.
486  *
487  * This code is used by the allocation code; periodically (whenever it runs out
488  * of buckets to allocate from) the allocation code will invalidate some
489  * buckets, but it can't use those buckets until their new gens are safely on
490  * disk.
491  */
492
493 static void prio_endio(struct bio *bio)
494 {
495         struct cache *ca = bio->bi_private;
496
497         cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
498         bch_bbio_free(bio, ca->set);
499         closure_put(&ca->prio);
500 }
501
502 static void prio_io(struct cache *ca, uint64_t bucket, int op,
503                     unsigned long op_flags)
504 {
505         struct closure *cl = &ca->prio;
506         struct bio *bio = bch_bbio_alloc(ca->set);
507
508         closure_init_stack(cl);
509
510         bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
511         bio_set_dev(bio, ca->bdev);
512         bio->bi_iter.bi_size    = bucket_bytes(ca);
513
514         bio->bi_end_io  = prio_endio;
515         bio->bi_private = ca;
516         bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
517         bch_bio_map(bio, ca->disk_buckets);
518
519         closure_bio_submit(bio, &ca->prio);
520         closure_sync(cl);
521 }
522
523 void bch_prio_write(struct cache *ca)
524 {
525         int i;
526         struct bucket *b;
527         struct closure cl;
528
529         closure_init_stack(&cl);
530
531         lockdep_assert_held(&ca->set->bucket_lock);
532
533         ca->disk_buckets->seq++;
534
535         atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
536                         &ca->meta_sectors_written);
537
538         //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
539         //       fifo_used(&ca->free_inc), fifo_used(&ca->unused));
540
541         for (i = prio_buckets(ca) - 1; i >= 0; --i) {
542                 long bucket;
543                 struct prio_set *p = ca->disk_buckets;
544                 struct bucket_disk *d = p->data;
545                 struct bucket_disk *end = d + prios_per_bucket(ca);
546
547                 for (b = ca->buckets + i * prios_per_bucket(ca);
548                      b < ca->buckets + ca->sb.nbuckets && d < end;
549                      b++, d++) {
550                         d->prio = cpu_to_le16(b->prio);
551                         d->gen = b->gen;
552                 }
553
554                 p->next_bucket  = ca->prio_buckets[i + 1];
555                 p->magic        = pset_magic(&ca->sb);
556                 p->csum         = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
557
558                 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
559                 BUG_ON(bucket == -1);
560
561                 mutex_unlock(&ca->set->bucket_lock);
562                 prio_io(ca, bucket, REQ_OP_WRITE, 0);
563                 mutex_lock(&ca->set->bucket_lock);
564
565                 ca->prio_buckets[i] = bucket;
566                 atomic_dec_bug(&ca->buckets[bucket].pin);
567         }
568
569         mutex_unlock(&ca->set->bucket_lock);
570
571         bch_journal_meta(ca->set, &cl);
572         closure_sync(&cl);
573
574         mutex_lock(&ca->set->bucket_lock);
575
576         /*
577          * Don't want the old priorities to get garbage collected until after we
578          * finish writing the new ones, and they're journalled
579          */
580         for (i = 0; i < prio_buckets(ca); i++) {
581                 if (ca->prio_last_buckets[i])
582                         __bch_bucket_free(ca,
583                                 &ca->buckets[ca->prio_last_buckets[i]]);
584
585                 ca->prio_last_buckets[i] = ca->prio_buckets[i];
586         }
587 }
588
589 static void prio_read(struct cache *ca, uint64_t bucket)
590 {
591         struct prio_set *p = ca->disk_buckets;
592         struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
593         struct bucket *b;
594         unsigned bucket_nr = 0;
595
596         for (b = ca->buckets;
597              b < ca->buckets + ca->sb.nbuckets;
598              b++, d++) {
599                 if (d == end) {
600                         ca->prio_buckets[bucket_nr] = bucket;
601                         ca->prio_last_buckets[bucket_nr] = bucket;
602                         bucket_nr++;
603
604                         prio_io(ca, bucket, REQ_OP_READ, 0);
605
606                         if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
607                                 pr_warn("bad csum reading priorities");
608
609                         if (p->magic != pset_magic(&ca->sb))
610                                 pr_warn("bad magic reading priorities");
611
612                         bucket = p->next_bucket;
613                         d = p->data;
614                 }
615
616                 b->prio = le16_to_cpu(d->prio);
617                 b->gen = b->last_gc = d->gen;
618         }
619 }
620
621 /* Bcache device */
622
623 static int open_dev(struct block_device *b, fmode_t mode)
624 {
625         struct bcache_device *d = b->bd_disk->private_data;
626         if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
627                 return -ENXIO;
628
629         closure_get(&d->cl);
630         return 0;
631 }
632
633 static void release_dev(struct gendisk *b, fmode_t mode)
634 {
635         struct bcache_device *d = b->private_data;
636         closure_put(&d->cl);
637 }
638
639 static int ioctl_dev(struct block_device *b, fmode_t mode,
640                      unsigned int cmd, unsigned long arg)
641 {
642         struct bcache_device *d = b->bd_disk->private_data;
643         return d->ioctl(d, mode, cmd, arg);
644 }
645
646 static const struct block_device_operations bcache_ops = {
647         .open           = open_dev,
648         .release        = release_dev,
649         .ioctl          = ioctl_dev,
650         .owner          = THIS_MODULE,
651 };
652
653 void bcache_device_stop(struct bcache_device *d)
654 {
655         if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
656                 closure_queue(&d->cl);
657 }
658
659 static void bcache_device_unlink(struct bcache_device *d)
660 {
661         lockdep_assert_held(&bch_register_lock);
662
663         if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
664                 unsigned i;
665                 struct cache *ca;
666
667                 sysfs_remove_link(&d->c->kobj, d->name);
668                 sysfs_remove_link(&d->kobj, "cache");
669
670                 for_each_cache(ca, d->c, i)
671                         bd_unlink_disk_holder(ca->bdev, d->disk);
672         }
673 }
674
675 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
676                                const char *name)
677 {
678         unsigned i;
679         struct cache *ca;
680
681         for_each_cache(ca, d->c, i)
682                 bd_link_disk_holder(ca->bdev, d->disk);
683
684         snprintf(d->name, BCACHEDEVNAME_SIZE,
685                  "%s%u", name, d->id);
686
687         WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
688              sysfs_create_link(&c->kobj, &d->kobj, d->name),
689              "Couldn't create device <-> cache set symlinks");
690
691         clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
692 }
693
694 static void bcache_device_detach(struct bcache_device *d)
695 {
696         lockdep_assert_held(&bch_register_lock);
697
698         if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
699                 struct uuid_entry *u = d->c->uuids + d->id;
700
701                 SET_UUID_FLASH_ONLY(u, 0);
702                 memcpy(u->uuid, invalid_uuid, 16);
703                 u->invalidated = cpu_to_le32(get_seconds());
704                 bch_uuid_write(d->c);
705         }
706
707         bcache_device_unlink(d);
708
709         d->c->devices[d->id] = NULL;
710         closure_put(&d->c->caching);
711         d->c = NULL;
712 }
713
714 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
715                                  unsigned id)
716 {
717         d->id = id;
718         d->c = c;
719         c->devices[id] = d;
720
721         closure_get(&c->caching);
722 }
723
724 static void bcache_device_free(struct bcache_device *d)
725 {
726         lockdep_assert_held(&bch_register_lock);
727
728         pr_info("%s stopped", d->disk->disk_name);
729
730         if (d->c)
731                 bcache_device_detach(d);
732         if (d->disk && d->disk->flags & GENHD_FL_UP)
733                 del_gendisk(d->disk);
734         if (d->disk && d->disk->queue)
735                 blk_cleanup_queue(d->disk->queue);
736         if (d->disk) {
737                 ida_simple_remove(&bcache_minor, d->disk->first_minor);
738                 put_disk(d->disk);
739         }
740
741         if (d->bio_split)
742                 bioset_free(d->bio_split);
743         kvfree(d->full_dirty_stripes);
744         kvfree(d->stripe_sectors_dirty);
745
746         closure_debug_destroy(&d->cl);
747 }
748
749 static int bcache_device_init(struct bcache_device *d, unsigned block_size,
750                               sector_t sectors)
751 {
752         struct request_queue *q;
753         size_t n;
754         int minor;
755
756         if (!d->stripe_size)
757                 d->stripe_size = 1 << 31;
758
759         d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
760
761         if (!d->nr_stripes ||
762             d->nr_stripes > INT_MAX ||
763             d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
764                 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
765                         (unsigned)d->nr_stripes);
766                 return -ENOMEM;
767         }
768
769         n = d->nr_stripes * sizeof(atomic_t);
770         d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
771         if (!d->stripe_sectors_dirty)
772                 return -ENOMEM;
773
774         n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
775         d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
776         if (!d->full_dirty_stripes)
777                 return -ENOMEM;
778
779         minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
780         if (minor < 0)
781                 return minor;
782
783         minor *= BCACHE_MINORS;
784
785         if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
786                                            BIOSET_NEED_BVECS |
787                                            BIOSET_NEED_RESCUER)) ||
788             !(d->disk = alloc_disk(BCACHE_MINORS))) {
789                 ida_simple_remove(&bcache_minor, minor);
790                 return -ENOMEM;
791         }
792
793         set_capacity(d->disk, sectors);
794         snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
795
796         d->disk->major          = bcache_major;
797         d->disk->first_minor    = minor;
798         d->disk->fops           = &bcache_ops;
799         d->disk->private_data   = d;
800
801         q = blk_alloc_queue(GFP_KERNEL);
802         if (!q)
803                 return -ENOMEM;
804
805         blk_queue_make_request(q, NULL);
806         d->disk->queue                  = q;
807         q->queuedata                    = d;
808         q->backing_dev_info->congested_data = d;
809         q->limits.max_hw_sectors        = UINT_MAX;
810         q->limits.max_sectors           = UINT_MAX;
811         q->limits.max_segment_size      = UINT_MAX;
812         q->limits.max_segments          = BIO_MAX_PAGES;
813         blk_queue_max_discard_sectors(q, UINT_MAX);
814         q->limits.discard_granularity   = 512;
815         q->limits.io_min                = block_size;
816         q->limits.logical_block_size    = block_size;
817         q->limits.physical_block_size   = block_size;
818         set_bit(QUEUE_FLAG_NONROT,      &d->disk->queue->queue_flags);
819         clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
820         set_bit(QUEUE_FLAG_DISCARD,     &d->disk->queue->queue_flags);
821
822         blk_queue_write_cache(q, true, true);
823
824         return 0;
825 }
826
827 /* Cached device */
828
829 static void calc_cached_dev_sectors(struct cache_set *c)
830 {
831         uint64_t sectors = 0;
832         struct cached_dev *dc;
833
834         list_for_each_entry(dc, &c->cached_devs, list)
835                 sectors += bdev_sectors(dc->bdev);
836
837         c->cached_dev_sectors = sectors;
838 }
839
840 void bch_cached_dev_run(struct cached_dev *dc)
841 {
842         struct bcache_device *d = &dc->disk;
843         char buf[SB_LABEL_SIZE + 1];
844         char *env[] = {
845                 "DRIVER=bcache",
846                 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
847                 NULL,
848                 NULL,
849         };
850
851         memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
852         buf[SB_LABEL_SIZE] = '\0';
853         env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
854
855         if (atomic_xchg(&dc->running, 1)) {
856                 kfree(env[1]);
857                 kfree(env[2]);
858                 return;
859         }
860
861         if (!d->c &&
862             BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
863                 struct closure cl;
864                 closure_init_stack(&cl);
865
866                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
867                 bch_write_bdev_super(dc, &cl);
868                 closure_sync(&cl);
869         }
870
871         add_disk(d->disk);
872         bd_link_disk_holder(dc->bdev, dc->disk.disk);
873         /* won't show up in the uevent file, use udevadm monitor -e instead
874          * only class / kset properties are persistent */
875         kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
876         kfree(env[1]);
877         kfree(env[2]);
878
879         if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
880             sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
881                 pr_debug("error creating sysfs link");
882 }
883
884 static void cached_dev_detach_finish(struct work_struct *w)
885 {
886         struct cached_dev *dc = container_of(w, struct cached_dev, detach);
887         char buf[BDEVNAME_SIZE];
888         struct closure cl;
889         closure_init_stack(&cl);
890
891         BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
892         BUG_ON(atomic_read(&dc->count));
893
894         mutex_lock(&bch_register_lock);
895
896         cancel_delayed_work_sync(&dc->writeback_rate_update);
897         if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
898                 kthread_stop(dc->writeback_thread);
899                 dc->writeback_thread = NULL;
900         }
901
902         memset(&dc->sb.set_uuid, 0, 16);
903         SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
904
905         bch_write_bdev_super(dc, &cl);
906         closure_sync(&cl);
907
908         calc_cached_dev_sectors(dc->disk.c);
909         bcache_device_detach(&dc->disk);
910         list_move(&dc->list, &uncached_devices);
911
912         clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
913         clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
914
915         mutex_unlock(&bch_register_lock);
916
917         pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
918
919         /* Drop ref we took in cached_dev_detach() */
920         closure_put(&dc->disk.cl);
921 }
922
923 void bch_cached_dev_detach(struct cached_dev *dc)
924 {
925         lockdep_assert_held(&bch_register_lock);
926
927         if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
928                 return;
929
930         if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
931                 return;
932
933         /*
934          * Block the device from being closed and freed until we're finished
935          * detaching
936          */
937         closure_get(&dc->disk.cl);
938
939         bch_writeback_queue(dc);
940         cached_dev_put(dc);
941 }
942
943 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
944                           uint8_t *set_uuid)
945 {
946         uint32_t rtime = cpu_to_le32(get_seconds());
947         struct uuid_entry *u;
948         char buf[BDEVNAME_SIZE];
949         struct cached_dev *exist_dc, *t;
950
951         bdevname(dc->bdev, buf);
952
953         if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
954             (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
955                 return -ENOENT;
956
957         if (dc->disk.c) {
958                 pr_err("Can't attach %s: already attached", buf);
959                 return -EINVAL;
960         }
961
962         if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
963                 pr_err("Can't attach %s: shutting down", buf);
964                 return -EINVAL;
965         }
966
967         if (dc->sb.block_size < c->sb.block_size) {
968                 /* Will die */
969                 pr_err("Couldn't attach %s: block size less than set's block size",
970                        buf);
971                 return -EINVAL;
972         }
973
974         /* Check whether already attached */
975         list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
976                 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
977                         pr_err("Tried to attach %s but duplicate UUID already attached",
978                                 buf);
979
980                         return -EINVAL;
981                 }
982         }
983
984         u = uuid_find(c, dc->sb.uuid);
985
986         if (u &&
987             (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
988              BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
989                 memcpy(u->uuid, invalid_uuid, 16);
990                 u->invalidated = cpu_to_le32(get_seconds());
991                 u = NULL;
992         }
993
994         if (!u) {
995                 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
996                         pr_err("Couldn't find uuid for %s in set", buf);
997                         return -ENOENT;
998                 }
999
1000                 u = uuid_find_empty(c);
1001                 if (!u) {
1002                         pr_err("Not caching %s, no room for UUID", buf);
1003                         return -EINVAL;
1004                 }
1005         }
1006
1007         /* Deadlocks since we're called via sysfs...
1008         sysfs_remove_file(&dc->kobj, &sysfs_attach);
1009          */
1010
1011         if (bch_is_zero(u->uuid, 16)) {
1012                 struct closure cl;
1013                 closure_init_stack(&cl);
1014
1015                 memcpy(u->uuid, dc->sb.uuid, 16);
1016                 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
1017                 u->first_reg = u->last_reg = rtime;
1018                 bch_uuid_write(c);
1019
1020                 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
1021                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1022
1023                 bch_write_bdev_super(dc, &cl);
1024                 closure_sync(&cl);
1025         } else {
1026                 u->last_reg = rtime;
1027                 bch_uuid_write(c);
1028         }
1029
1030         bcache_device_attach(&dc->disk, c, u - c->uuids);
1031         list_move(&dc->list, &c->cached_devs);
1032         calc_cached_dev_sectors(c);
1033
1034         smp_wmb();
1035         /*
1036          * dc->c must be set before dc->count != 0 - paired with the mb in
1037          * cached_dev_get()
1038          */
1039         atomic_set(&dc->count, 1);
1040
1041         /* Block writeback thread, but spawn it */
1042         down_write(&dc->writeback_lock);
1043         if (bch_cached_dev_writeback_start(dc)) {
1044                 up_write(&dc->writeback_lock);
1045                 return -ENOMEM;
1046         }
1047
1048         if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1049                 atomic_set(&dc->has_dirty, 1);
1050                 atomic_inc(&dc->count);
1051                 bch_writeback_queue(dc);
1052         }
1053
1054         bch_sectors_dirty_init(&dc->disk);
1055
1056         bch_cached_dev_run(dc);
1057         bcache_device_link(&dc->disk, c, "bdev");
1058
1059         /* Allow the writeback thread to proceed */
1060         up_write(&dc->writeback_lock);
1061
1062         pr_info("Caching %s as %s on set %pU",
1063                 bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
1064                 dc->disk.c->sb.set_uuid);
1065         return 0;
1066 }
1067
1068 void bch_cached_dev_release(struct kobject *kobj)
1069 {
1070         struct cached_dev *dc = container_of(kobj, struct cached_dev,
1071                                              disk.kobj);
1072         kfree(dc);
1073         module_put(THIS_MODULE);
1074 }
1075
1076 static void cached_dev_free(struct closure *cl)
1077 {
1078         struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1079
1080         cancel_delayed_work_sync(&dc->writeback_rate_update);
1081         if (!IS_ERR_OR_NULL(dc->writeback_thread))
1082                 kthread_stop(dc->writeback_thread);
1083         if (dc->writeback_write_wq)
1084                 destroy_workqueue(dc->writeback_write_wq);
1085
1086         mutex_lock(&bch_register_lock);
1087
1088         if (atomic_read(&dc->running))
1089                 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1090         bcache_device_free(&dc->disk);
1091         list_del(&dc->list);
1092
1093         mutex_unlock(&bch_register_lock);
1094
1095         if (!IS_ERR_OR_NULL(dc->bdev))
1096                 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1097
1098         wake_up(&unregister_wait);
1099
1100         kobject_put(&dc->disk.kobj);
1101 }
1102
1103 static void cached_dev_flush(struct closure *cl)
1104 {
1105         struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1106         struct bcache_device *d = &dc->disk;
1107
1108         mutex_lock(&bch_register_lock);
1109         bcache_device_unlink(d);
1110         mutex_unlock(&bch_register_lock);
1111
1112         bch_cache_accounting_destroy(&dc->accounting);
1113         kobject_del(&d->kobj);
1114
1115         continue_at(cl, cached_dev_free, system_wq);
1116 }
1117
1118 static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1119 {
1120         int ret;
1121         struct io *io;
1122         struct request_queue *q = bdev_get_queue(dc->bdev);
1123
1124         __module_get(THIS_MODULE);
1125         INIT_LIST_HEAD(&dc->list);
1126         closure_init(&dc->disk.cl, NULL);
1127         set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1128         kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1129         INIT_WORK(&dc->detach, cached_dev_detach_finish);
1130         sema_init(&dc->sb_write_mutex, 1);
1131         INIT_LIST_HEAD(&dc->io_lru);
1132         spin_lock_init(&dc->io_lock);
1133         bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1134
1135         dc->sequential_cutoff           = 4 << 20;
1136
1137         for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1138                 list_add(&io->lru, &dc->io_lru);
1139                 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1140         }
1141
1142         dc->disk.stripe_size = q->limits.io_opt >> 9;
1143
1144         if (dc->disk.stripe_size)
1145                 dc->partial_stripes_expensive =
1146                         q->limits.raid_partial_stripes_expensive;
1147
1148         ret = bcache_device_init(&dc->disk, block_size,
1149                          dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1150         if (ret)
1151                 return ret;
1152
1153         set_capacity(dc->disk.disk,
1154                      dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1155
1156         dc->disk.disk->queue->backing_dev_info->ra_pages =
1157                 max(dc->disk.disk->queue->backing_dev_info->ra_pages,
1158                     q->backing_dev_info->ra_pages);
1159
1160         bch_cached_dev_request_init(dc);
1161         bch_cached_dev_writeback_init(dc);
1162         return 0;
1163 }
1164
1165 /* Cached device - bcache superblock */
1166
1167 static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1168                                  struct block_device *bdev,
1169                                  struct cached_dev *dc)
1170 {
1171         char name[BDEVNAME_SIZE];
1172         const char *err = "cannot allocate memory";
1173         struct cache_set *c;
1174
1175         memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1176         dc->bdev = bdev;
1177         dc->bdev->bd_holder = dc;
1178
1179         bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
1180         dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1181         get_page(sb_page);
1182
1183         if (cached_dev_init(dc, sb->block_size << 9))
1184                 goto err;
1185
1186         err = "error creating kobject";
1187         if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
1188                         "bcache"))
1189                 goto err;
1190         if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1191                 goto err;
1192
1193         pr_info("registered backing device %s", bdevname(bdev, name));
1194
1195         list_add(&dc->list, &uncached_devices);
1196         list_for_each_entry(c, &bch_cache_sets, list)
1197                 bch_cached_dev_attach(dc, c, NULL);
1198
1199         if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1200             BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1201                 bch_cached_dev_run(dc);
1202
1203         return;
1204 err:
1205         pr_notice("error %s: %s", bdevname(bdev, name), err);
1206         bcache_device_stop(&dc->disk);
1207 }
1208
1209 /* Flash only volumes */
1210
1211 void bch_flash_dev_release(struct kobject *kobj)
1212 {
1213         struct bcache_device *d = container_of(kobj, struct bcache_device,
1214                                                kobj);
1215         kfree(d);
1216 }
1217
1218 static void flash_dev_free(struct closure *cl)
1219 {
1220         struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1221         mutex_lock(&bch_register_lock);
1222         bcache_device_free(d);
1223         mutex_unlock(&bch_register_lock);
1224         kobject_put(&d->kobj);
1225 }
1226
1227 static void flash_dev_flush(struct closure *cl)
1228 {
1229         struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1230
1231         mutex_lock(&bch_register_lock);
1232         bcache_device_unlink(d);
1233         mutex_unlock(&bch_register_lock);
1234         kobject_del(&d->kobj);
1235         continue_at(cl, flash_dev_free, system_wq);
1236 }
1237
1238 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1239 {
1240         struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1241                                           GFP_KERNEL);
1242         if (!d)
1243                 return -ENOMEM;
1244
1245         closure_init(&d->cl, NULL);
1246         set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1247
1248         kobject_init(&d->kobj, &bch_flash_dev_ktype);
1249
1250         if (bcache_device_init(d, block_bytes(c), u->sectors))
1251                 goto err;
1252
1253         bcache_device_attach(d, c, u - c->uuids);
1254         bch_sectors_dirty_init(d);
1255         bch_flash_dev_request_init(d);
1256         add_disk(d->disk);
1257
1258         if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
1259                 goto err;
1260
1261         bcache_device_link(d, c, "volume");
1262
1263         return 0;
1264 err:
1265         kobject_put(&d->kobj);
1266         return -ENOMEM;
1267 }
1268
1269 static int flash_devs_run(struct cache_set *c)
1270 {
1271         int ret = 0;
1272         struct uuid_entry *u;
1273
1274         for (u = c->uuids;
1275              u < c->uuids + c->nr_uuids && !ret;
1276              u++)
1277                 if (UUID_FLASH_ONLY(u))
1278                         ret = flash_dev_run(c, u);
1279
1280         return ret;
1281 }
1282
1283 int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1284 {
1285         struct uuid_entry *u;
1286
1287         if (test_bit(CACHE_SET_STOPPING, &c->flags))
1288                 return -EINTR;
1289
1290         if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1291                 return -EPERM;
1292
1293         u = uuid_find_empty(c);
1294         if (!u) {
1295                 pr_err("Can't create volume, no room for UUID");
1296                 return -EINVAL;
1297         }
1298
1299         get_random_bytes(u->uuid, 16);
1300         memset(u->label, 0, 32);
1301         u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
1302
1303         SET_UUID_FLASH_ONLY(u, 1);
1304         u->sectors = size >> 9;
1305
1306         bch_uuid_write(c);
1307
1308         return flash_dev_run(c, u);
1309 }
1310
1311 /* Cache set */
1312
1313 __printf(2, 3)
1314 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1315 {
1316         va_list args;
1317
1318         if (c->on_error != ON_ERROR_PANIC &&
1319             test_bit(CACHE_SET_STOPPING, &c->flags))
1320                 return false;
1321
1322         /* XXX: we can be called from atomic context
1323         acquire_console_sem();
1324         */
1325
1326         printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
1327
1328         va_start(args, fmt);
1329         vprintk(fmt, args);
1330         va_end(args);
1331
1332         printk(", disabling caching\n");
1333
1334         if (c->on_error == ON_ERROR_PANIC)
1335                 panic("panic forced after error\n");
1336
1337         bch_cache_set_unregister(c);
1338         return true;
1339 }
1340
1341 void bch_cache_set_release(struct kobject *kobj)
1342 {
1343         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1344         kfree(c);
1345         module_put(THIS_MODULE);
1346 }
1347
1348 static void cache_set_free(struct closure *cl)
1349 {
1350         struct cache_set *c = container_of(cl, struct cache_set, cl);
1351         struct cache *ca;
1352         unsigned i;
1353
1354         if (!IS_ERR_OR_NULL(c->debug))
1355                 debugfs_remove(c->debug);
1356
1357         bch_open_buckets_free(c);
1358         bch_btree_cache_free(c);
1359         bch_journal_free(c);
1360
1361         mutex_lock(&bch_register_lock);
1362         for_each_cache(ca, c, i)
1363                 if (ca) {
1364                         ca->set = NULL;
1365                         c->cache[ca->sb.nr_this_dev] = NULL;
1366                         kobject_put(&ca->kobj);
1367                 }
1368
1369         bch_bset_sort_state_free(&c->sort);
1370         free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
1371
1372         if (c->moving_gc_wq)
1373                 destroy_workqueue(c->moving_gc_wq);
1374         if (c->bio_split)
1375                 bioset_free(c->bio_split);
1376         if (c->fill_iter)
1377                 mempool_destroy(c->fill_iter);
1378         if (c->bio_meta)
1379                 mempool_destroy(c->bio_meta);
1380         if (c->search)
1381                 mempool_destroy(c->search);
1382         kfree(c->devices);
1383
1384         list_del(&c->list);
1385         mutex_unlock(&bch_register_lock);
1386
1387         pr_info("Cache set %pU unregistered", c->sb.set_uuid);
1388         wake_up(&unregister_wait);
1389
1390         closure_debug_destroy(&c->cl);
1391         kobject_put(&c->kobj);
1392 }
1393
1394 static void cache_set_flush(struct closure *cl)
1395 {
1396         struct cache_set *c = container_of(cl, struct cache_set, caching);
1397         struct cache *ca;
1398         struct btree *b;
1399         unsigned i;
1400
1401         bch_cache_accounting_destroy(&c->accounting);
1402
1403         kobject_put(&c->internal);
1404         kobject_del(&c->kobj);
1405
1406         if (!IS_ERR_OR_NULL(c->gc_thread))
1407                 kthread_stop(c->gc_thread);
1408
1409         if (!IS_ERR_OR_NULL(c->root))
1410                 list_add(&c->root->list, &c->btree_cache);
1411
1412         /* Should skip this if we're unregistering because of an error */
1413         list_for_each_entry(b, &c->btree_cache, list) {
1414                 mutex_lock(&b->write_lock);
1415                 if (btree_node_dirty(b))
1416                         __bch_btree_node_write(b, NULL);
1417                 mutex_unlock(&b->write_lock);
1418         }
1419
1420         for_each_cache(ca, c, i)
1421                 if (ca->alloc_thread)
1422                         kthread_stop(ca->alloc_thread);
1423
1424         if (c->journal.cur) {
1425                 cancel_delayed_work_sync(&c->journal.work);
1426                 /* flush last journal entry if needed */
1427                 c->journal.work.work.func(&c->journal.work.work);
1428         }
1429
1430         closure_return(cl);
1431 }
1432
1433 static void __cache_set_unregister(struct closure *cl)
1434 {
1435         struct cache_set *c = container_of(cl, struct cache_set, caching);
1436         struct cached_dev *dc;
1437         size_t i;
1438
1439         mutex_lock(&bch_register_lock);
1440
1441         for (i = 0; i < c->nr_uuids; i++)
1442                 if (c->devices[i]) {
1443                         if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1444                             test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1445                                 dc = container_of(c->devices[i],
1446                                                   struct cached_dev, disk);
1447                                 bch_cached_dev_detach(dc);
1448                         } else {
1449                                 bcache_device_stop(c->devices[i]);
1450                         }
1451                 }
1452
1453         mutex_unlock(&bch_register_lock);
1454
1455         continue_at(cl, cache_set_flush, system_wq);
1456 }
1457
1458 void bch_cache_set_stop(struct cache_set *c)
1459 {
1460         if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1461                 closure_queue(&c->caching);
1462 }
1463
1464 void bch_cache_set_unregister(struct cache_set *c)
1465 {
1466         set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1467         bch_cache_set_stop(c);
1468 }
1469
1470 #define alloc_bucket_pages(gfp, c)                      \
1471         ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
1472
1473 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1474 {
1475         int iter_size;
1476         struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1477         if (!c)
1478                 return NULL;
1479
1480         __module_get(THIS_MODULE);
1481         closure_init(&c->cl, NULL);
1482         set_closure_fn(&c->cl, cache_set_free, system_wq);
1483
1484         closure_init(&c->caching, &c->cl);
1485         set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1486
1487         /* Maybe create continue_at_noreturn() and use it here? */
1488         closure_set_stopped(&c->cl);
1489         closure_put(&c->cl);
1490
1491         kobject_init(&c->kobj, &bch_cache_set_ktype);
1492         kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1493
1494         bch_cache_accounting_init(&c->accounting, &c->cl);
1495
1496         memcpy(c->sb.set_uuid, sb->set_uuid, 16);
1497         c->sb.block_size        = sb->block_size;
1498         c->sb.bucket_size       = sb->bucket_size;
1499         c->sb.nr_in_set         = sb->nr_in_set;
1500         c->sb.last_mount        = sb->last_mount;
1501         c->bucket_bits          = ilog2(sb->bucket_size);
1502         c->block_bits           = ilog2(sb->block_size);
1503         c->nr_uuids             = bucket_bytes(c) / sizeof(struct uuid_entry);
1504
1505         c->btree_pages          = bucket_pages(c);
1506         if (c->btree_pages > BTREE_MAX_PAGES)
1507                 c->btree_pages = max_t(int, c->btree_pages / 4,
1508                                        BTREE_MAX_PAGES);
1509
1510         sema_init(&c->sb_write_mutex, 1);
1511         mutex_init(&c->bucket_lock);
1512         init_waitqueue_head(&c->btree_cache_wait);
1513         spin_lock_init(&c->btree_cannibalize_lock);
1514         init_waitqueue_head(&c->bucket_wait);
1515         init_waitqueue_head(&c->gc_wait);
1516         sema_init(&c->uuid_write_mutex, 1);
1517
1518         spin_lock_init(&c->btree_gc_time.lock);
1519         spin_lock_init(&c->btree_split_time.lock);
1520         spin_lock_init(&c->btree_read_time.lock);
1521
1522         bch_moving_init_cache_set(c);
1523
1524         INIT_LIST_HEAD(&c->list);
1525         INIT_LIST_HEAD(&c->cached_devs);
1526         INIT_LIST_HEAD(&c->btree_cache);
1527         INIT_LIST_HEAD(&c->btree_cache_freeable);
1528         INIT_LIST_HEAD(&c->btree_cache_freed);
1529         INIT_LIST_HEAD(&c->data_buckets);
1530
1531         c->search = mempool_create_slab_pool(32, bch_search_cache);
1532         if (!c->search)
1533                 goto err;
1534
1535         iter_size = (sb->bucket_size / sb->block_size + 1) *
1536                 sizeof(struct btree_iter_set);
1537
1538         if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
1539             !(c->bio_meta = mempool_create_kmalloc_pool(2,
1540                                 sizeof(struct bbio) + sizeof(struct bio_vec) *
1541                                 bucket_pages(c))) ||
1542             !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
1543             !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio),
1544                                            BIOSET_NEED_BVECS |
1545                                            BIOSET_NEED_RESCUER)) ||
1546             !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1547             !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
1548                                                 WQ_MEM_RECLAIM, 0)) ||
1549             bch_journal_alloc(c) ||
1550             bch_btree_cache_alloc(c) ||
1551             bch_open_buckets_alloc(c) ||
1552             bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1553                 goto err;
1554
1555         c->congested_read_threshold_us  = 2000;
1556         c->congested_write_threshold_us = 20000;
1557         c->error_limit  = 8 << IO_ERROR_SHIFT;
1558
1559         return c;
1560 err:
1561         bch_cache_set_unregister(c);
1562         return NULL;
1563 }
1564
1565 static int run_cache_set(struct cache_set *c)
1566 {
1567         const char *err = "cannot allocate memory";
1568         struct cached_dev *dc, *t;
1569         struct cache *ca;
1570         struct closure cl;
1571         unsigned i;
1572
1573         closure_init_stack(&cl);
1574
1575         for_each_cache(ca, c, i)
1576                 c->nbuckets += ca->sb.nbuckets;
1577         set_gc_sectors(c);
1578
1579         if (CACHE_SYNC(&c->sb)) {
1580                 LIST_HEAD(journal);
1581                 struct bkey *k;
1582                 struct jset *j;
1583
1584                 err = "cannot allocate memory for journal";
1585                 if (bch_journal_read(c, &journal))
1586                         goto err;
1587
1588                 pr_debug("btree_journal_read() done");
1589
1590                 err = "no journal entries found";
1591                 if (list_empty(&journal))
1592                         goto err;
1593
1594                 j = &list_entry(journal.prev, struct journal_replay, list)->j;
1595
1596                 err = "IO error reading priorities";
1597                 for_each_cache(ca, c, i)
1598                         prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
1599
1600                 /*
1601                  * If prio_read() fails it'll call cache_set_error and we'll
1602                  * tear everything down right away, but if we perhaps checked
1603                  * sooner we could avoid journal replay.
1604                  */
1605
1606                 k = &j->btree_root;
1607
1608                 err = "bad btree root";
1609                 if (__bch_btree_ptr_invalid(c, k))
1610                         goto err;
1611
1612                 err = "error reading btree root";
1613                 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
1614                 if (IS_ERR_OR_NULL(c->root))
1615                         goto err;
1616
1617                 list_del_init(&c->root->list);
1618                 rw_unlock(true, c->root);
1619
1620                 err = uuid_read(c, j, &cl);
1621                 if (err)
1622                         goto err;
1623
1624                 err = "error in recovery";
1625                 if (bch_btree_check(c))
1626                         goto err;
1627
1628                 bch_journal_mark(c, &journal);
1629                 bch_initial_gc_finish(c);
1630                 pr_debug("btree_check() done");
1631
1632                 /*
1633                  * bcache_journal_next() can't happen sooner, or
1634                  * btree_gc_finish() will give spurious errors about last_gc >
1635                  * gc_gen - this is a hack but oh well.
1636                  */
1637                 bch_journal_next(&c->journal);
1638
1639                 err = "error starting allocator thread";
1640                 for_each_cache(ca, c, i)
1641                         if (bch_cache_allocator_start(ca))
1642                                 goto err;
1643
1644                 /*
1645                  * First place it's safe to allocate: btree_check() and
1646                  * btree_gc_finish() have to run before we have buckets to
1647                  * allocate, and bch_bucket_alloc_set() might cause a journal
1648                  * entry to be written so bcache_journal_next() has to be called
1649                  * first.
1650                  *
1651                  * If the uuids were in the old format we have to rewrite them
1652                  * before the next journal entry is written:
1653                  */
1654                 if (j->version < BCACHE_JSET_VERSION_UUID)
1655                         __uuid_write(c);
1656
1657                 err = "bcache: replay journal failed";
1658                 if (bch_journal_replay(c, &journal))
1659                         goto err;
1660         } else {
1661                 pr_notice("invalidating existing data");
1662
1663                 for_each_cache(ca, c, i) {
1664                         unsigned j;
1665
1666                         ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
1667                                               2, SB_JOURNAL_BUCKETS);
1668
1669                         for (j = 0; j < ca->sb.keys; j++)
1670                                 ca->sb.d[j] = ca->sb.first_bucket + j;
1671                 }
1672
1673                 bch_initial_gc_finish(c);
1674
1675                 err = "error starting allocator thread";
1676                 for_each_cache(ca, c, i)
1677                         if (bch_cache_allocator_start(ca))
1678                                 goto err;
1679
1680                 mutex_lock(&c->bucket_lock);
1681                 for_each_cache(ca, c, i)
1682                         bch_prio_write(ca);
1683                 mutex_unlock(&c->bucket_lock);
1684
1685                 err = "cannot allocate new UUID bucket";
1686                 if (__uuid_write(c))
1687                         goto err;
1688
1689                 err = "cannot allocate new btree root";
1690                 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
1691                 if (IS_ERR_OR_NULL(c->root))
1692                         goto err;
1693
1694                 mutex_lock(&c->root->write_lock);
1695                 bkey_copy_key(&c->root->key, &MAX_KEY);
1696                 bch_btree_node_write(c->root, &cl);
1697                 mutex_unlock(&c->root->write_lock);
1698
1699                 bch_btree_set_root(c->root);
1700                 rw_unlock(true, c->root);
1701
1702                 /*
1703                  * We don't want to write the first journal entry until
1704                  * everything is set up - fortunately journal entries won't be
1705                  * written until the SET_CACHE_SYNC() here:
1706                  */
1707                 SET_CACHE_SYNC(&c->sb, true);
1708
1709                 bch_journal_next(&c->journal);
1710                 bch_journal_meta(c, &cl);
1711         }
1712
1713         err = "error starting gc thread";
1714         if (bch_gc_thread_start(c))
1715                 goto err;
1716
1717         closure_sync(&cl);
1718         c->sb.last_mount = get_seconds();
1719         bcache_write_super(c);
1720
1721         list_for_each_entry_safe(dc, t, &uncached_devices, list)
1722                 bch_cached_dev_attach(dc, c, NULL);
1723
1724         flash_devs_run(c);
1725
1726         set_bit(CACHE_SET_RUNNING, &c->flags);
1727         return 0;
1728 err:
1729         closure_sync(&cl);
1730         /* XXX: test this, it's broken */
1731         bch_cache_set_error(c, "%s", err);
1732
1733         return -EIO;
1734 }
1735
1736 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1737 {
1738         return ca->sb.block_size        == c->sb.block_size &&
1739                 ca->sb.bucket_size      == c->sb.bucket_size &&
1740                 ca->sb.nr_in_set        == c->sb.nr_in_set;
1741 }
1742
1743 static const char *register_cache_set(struct cache *ca)
1744 {
1745         char buf[12];
1746         const char *err = "cannot allocate memory";
1747         struct cache_set *c;
1748
1749         list_for_each_entry(c, &bch_cache_sets, list)
1750                 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
1751                         if (c->cache[ca->sb.nr_this_dev])
1752                                 return "duplicate cache set member";
1753
1754                         if (!can_attach_cache(ca, c))
1755                                 return "cache sb does not match set";
1756
1757                         if (!CACHE_SYNC(&ca->sb))
1758                                 SET_CACHE_SYNC(&c->sb, false);
1759
1760                         goto found;
1761                 }
1762
1763         c = bch_cache_set_alloc(&ca->sb);
1764         if (!c)
1765                 return err;
1766
1767         err = "error creating kobject";
1768         if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
1769             kobject_add(&c->internal, &c->kobj, "internal"))
1770                 goto err;
1771
1772         if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
1773                 goto err;
1774
1775         bch_debug_init_cache_set(c);
1776
1777         list_add(&c->list, &bch_cache_sets);
1778 found:
1779         sprintf(buf, "cache%i", ca->sb.nr_this_dev);
1780         if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
1781             sysfs_create_link(&c->kobj, &ca->kobj, buf))
1782                 goto err;
1783
1784         /*
1785          * A special case is both ca->sb.seq and c->sb.seq are 0,
1786          * such condition happens on a new created cache device whose
1787          * super block is never flushed yet. In this case c->sb.version
1788          * and other members should be updated too, otherwise we will
1789          * have a mistaken super block version in cache set.
1790          */
1791         if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
1792                 c->sb.version           = ca->sb.version;
1793                 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
1794                 c->sb.flags             = ca->sb.flags;
1795                 c->sb.seq               = ca->sb.seq;
1796                 pr_debug("set version = %llu", c->sb.version);
1797         }
1798
1799         kobject_get(&ca->kobj);
1800         ca->set = c;
1801         ca->set->cache[ca->sb.nr_this_dev] = ca;
1802         c->cache_by_alloc[c->caches_loaded++] = ca;
1803
1804         if (c->caches_loaded == c->sb.nr_in_set) {
1805                 err = "failed to run cache set";
1806                 if (run_cache_set(c) < 0)
1807                         goto err;
1808         }
1809
1810         return NULL;
1811 err:
1812         bch_cache_set_unregister(c);
1813         return err;
1814 }
1815
1816 /* Cache device */
1817
1818 void bch_cache_release(struct kobject *kobj)
1819 {
1820         struct cache *ca = container_of(kobj, struct cache, kobj);
1821         unsigned i;
1822
1823         if (ca->set) {
1824                 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
1825                 ca->set->cache[ca->sb.nr_this_dev] = NULL;
1826         }
1827
1828         free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
1829         kfree(ca->prio_buckets);
1830         vfree(ca->buckets);
1831
1832         free_heap(&ca->heap);
1833         free_fifo(&ca->free_inc);
1834
1835         for (i = 0; i < RESERVE_NR; i++)
1836                 free_fifo(&ca->free[i]);
1837
1838         if (ca->sb_bio.bi_inline_vecs[0].bv_page)
1839                 put_page(ca->sb_bio.bi_io_vec[0].bv_page);
1840
1841         if (!IS_ERR_OR_NULL(ca->bdev))
1842                 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1843
1844         kfree(ca);
1845         module_put(THIS_MODULE);
1846 }
1847
1848 static int cache_alloc(struct cache *ca)
1849 {
1850         size_t free;
1851         size_t btree_buckets;
1852         struct bucket *b;
1853
1854         __module_get(THIS_MODULE);
1855         kobject_init(&ca->kobj, &bch_cache_ktype);
1856
1857         bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
1858
1859         /*
1860          * when ca->sb.njournal_buckets is not zero, journal exists,
1861          * and in bch_journal_replay(), tree node may split,
1862          * so bucket of RESERVE_BTREE type is needed,
1863          * the worst situation is all journal buckets are valid journal,
1864          * and all the keys need to replay,
1865          * so the number of  RESERVE_BTREE type buckets should be as much
1866          * as journal buckets
1867          */
1868         btree_buckets = ca->sb.njournal_buckets ?: 8;
1869         free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
1870
1871         if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
1872             !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
1873             !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
1874             !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
1875             !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
1876             !init_heap(&ca->heap,       free << 3, GFP_KERNEL) ||
1877             !(ca->buckets       = vzalloc(sizeof(struct bucket) *
1878                                           ca->sb.nbuckets)) ||
1879             !(ca->prio_buckets  = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1880                                           2, GFP_KERNEL)) ||
1881             !(ca->disk_buckets  = alloc_bucket_pages(GFP_KERNEL, ca)))
1882                 return -ENOMEM;
1883
1884         ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
1885
1886         for_each_bucket(b, ca)
1887                 atomic_set(&b->pin, 0);
1888
1889         return 0;
1890 }
1891
1892 static int register_cache(struct cache_sb *sb, struct page *sb_page,
1893                                 struct block_device *bdev, struct cache *ca)
1894 {
1895         char name[BDEVNAME_SIZE];
1896         const char *err = NULL; /* must be set for any error case */
1897         int ret = 0;
1898
1899         bdevname(bdev, name);
1900
1901         memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1902         ca->bdev = bdev;
1903         ca->bdev->bd_holder = ca;
1904
1905         bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
1906         ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1907         get_page(sb_page);
1908
1909         if (blk_queue_discard(bdev_get_queue(bdev)))
1910                 ca->discard = CACHE_DISCARD(&ca->sb);
1911
1912         ret = cache_alloc(ca);
1913         if (ret != 0) {
1914                 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1915                 if (ret == -ENOMEM)
1916                         err = "cache_alloc(): -ENOMEM";
1917                 else
1918                         err = "cache_alloc(): unknown error";
1919                 goto err;
1920         }
1921
1922         if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
1923                 err = "error calling kobject_add";
1924                 ret = -ENOMEM;
1925                 goto out;
1926         }
1927
1928         mutex_lock(&bch_register_lock);
1929         err = register_cache_set(ca);
1930         mutex_unlock(&bch_register_lock);
1931
1932         if (err) {
1933                 ret = -ENODEV;
1934                 goto out;
1935         }
1936
1937         pr_info("registered cache device %s", name);
1938
1939 out:
1940         kobject_put(&ca->kobj);
1941
1942 err:
1943         if (err)
1944                 pr_notice("error %s: %s", name, err);
1945
1946         return ret;
1947 }
1948
1949 /* Global interfaces/init */
1950
1951 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
1952                                const char *, size_t);
1953
1954 kobj_attribute_write(register,          register_bcache);
1955 kobj_attribute_write(register_quiet,    register_bcache);
1956
1957 static bool bch_is_open_backing(struct block_device *bdev) {
1958         struct cache_set *c, *tc;
1959         struct cached_dev *dc, *t;
1960
1961         list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1962                 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1963                         if (dc->bdev == bdev)
1964                                 return true;
1965         list_for_each_entry_safe(dc, t, &uncached_devices, list)
1966                 if (dc->bdev == bdev)
1967                         return true;
1968         return false;
1969 }
1970
1971 static bool bch_is_open_cache(struct block_device *bdev) {
1972         struct cache_set *c, *tc;
1973         struct cache *ca;
1974         unsigned i;
1975
1976         list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1977                 for_each_cache(ca, c, i)
1978                         if (ca->bdev == bdev)
1979                                 return true;
1980         return false;
1981 }
1982
1983 static bool bch_is_open(struct block_device *bdev) {
1984         return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
1985 }
1986
1987 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1988                                const char *buffer, size_t size)
1989 {
1990         ssize_t ret = size;
1991         const char *err = "cannot allocate memory";
1992         char *path = NULL;
1993         struct cache_sb *sb = NULL;
1994         struct block_device *bdev = NULL;
1995         struct page *sb_page = NULL;
1996
1997         if (!try_module_get(THIS_MODULE))
1998                 return -EBUSY;
1999
2000         if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
2001             !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
2002                 goto err;
2003
2004         err = "failed to open device";
2005         bdev = blkdev_get_by_path(strim(path),
2006                                   FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2007                                   sb);
2008         if (IS_ERR(bdev)) {
2009                 if (bdev == ERR_PTR(-EBUSY)) {
2010                         bdev = lookup_bdev(strim(path));
2011                         mutex_lock(&bch_register_lock);
2012                         if (!IS_ERR(bdev) && bch_is_open(bdev))
2013                                 err = "device already registered";
2014                         else
2015                                 err = "device busy";
2016                         mutex_unlock(&bch_register_lock);
2017                         if (!IS_ERR(bdev))
2018                                 bdput(bdev);
2019                         if (attr == &ksysfs_register_quiet)
2020                                 goto out;
2021                 }
2022                 goto err;
2023         }
2024
2025         err = "failed to set blocksize";
2026         if (set_blocksize(bdev, 4096))
2027                 goto err_close;
2028
2029         err = read_super(sb, bdev, &sb_page);
2030         if (err)
2031                 goto err_close;
2032
2033         err = "failed to register device";
2034         if (SB_IS_BDEV(sb)) {
2035                 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2036                 if (!dc)
2037                         goto err_close;
2038
2039                 mutex_lock(&bch_register_lock);
2040                 register_bdev(sb, sb_page, bdev, dc);
2041                 mutex_unlock(&bch_register_lock);
2042         } else {
2043                 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2044                 if (!ca)
2045                         goto err_close;
2046
2047                 if (register_cache(sb, sb_page, bdev, ca) != 0)
2048                         goto err;
2049         }
2050 out:
2051         if (sb_page)
2052                 put_page(sb_page);
2053         kfree(sb);
2054         kfree(path);
2055         module_put(THIS_MODULE);
2056         return ret;
2057
2058 err_close:
2059         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2060 err:
2061         pr_info("error %s: %s", path, err);
2062         ret = -EINVAL;
2063         goto out;
2064 }
2065
2066 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2067 {
2068         if (code == SYS_DOWN ||
2069             code == SYS_HALT ||
2070             code == SYS_POWER_OFF) {
2071                 DEFINE_WAIT(wait);
2072                 unsigned long start = jiffies;
2073                 bool stopped = false;
2074
2075                 struct cache_set *c, *tc;
2076                 struct cached_dev *dc, *tdc;
2077
2078                 mutex_lock(&bch_register_lock);
2079
2080                 if (list_empty(&bch_cache_sets) &&
2081                     list_empty(&uncached_devices))
2082                         goto out;
2083
2084                 pr_info("Stopping all devices:");
2085
2086                 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2087                         bch_cache_set_stop(c);
2088
2089                 list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2090                         bcache_device_stop(&dc->disk);
2091
2092                 /* What's a condition variable? */
2093                 while (1) {
2094                         long timeout = start + 2 * HZ - jiffies;
2095
2096                         stopped = list_empty(&bch_cache_sets) &&
2097                                 list_empty(&uncached_devices);
2098
2099                         if (timeout < 0 || stopped)
2100                                 break;
2101
2102                         prepare_to_wait(&unregister_wait, &wait,
2103                                         TASK_UNINTERRUPTIBLE);
2104
2105                         mutex_unlock(&bch_register_lock);
2106                         schedule_timeout(timeout);
2107                         mutex_lock(&bch_register_lock);
2108                 }
2109
2110                 finish_wait(&unregister_wait, &wait);
2111
2112                 if (stopped)
2113                         pr_info("All devices stopped");
2114                 else
2115                         pr_notice("Timeout waiting for devices to be closed");
2116 out:
2117                 mutex_unlock(&bch_register_lock);
2118         }
2119
2120         return NOTIFY_DONE;
2121 }
2122
2123 static struct notifier_block reboot = {
2124         .notifier_call  = bcache_reboot,
2125         .priority       = INT_MAX, /* before any real devices */
2126 };
2127
2128 static void bcache_exit(void)
2129 {
2130         bch_debug_exit();
2131         bch_request_exit();
2132         if (bcache_kobj)
2133                 kobject_put(bcache_kobj);
2134         if (bcache_wq)
2135                 destroy_workqueue(bcache_wq);
2136         if (bcache_major)
2137                 unregister_blkdev(bcache_major, "bcache");
2138         unregister_reboot_notifier(&reboot);
2139         mutex_destroy(&bch_register_lock);
2140 }
2141
2142 static int __init bcache_init(void)
2143 {
2144         static const struct attribute *files[] = {
2145                 &ksysfs_register.attr,
2146                 &ksysfs_register_quiet.attr,
2147                 NULL
2148         };
2149
2150         mutex_init(&bch_register_lock);
2151         init_waitqueue_head(&unregister_wait);
2152         register_reboot_notifier(&reboot);
2153         closure_debug_init();
2154
2155         bcache_major = register_blkdev(0, "bcache");
2156         if (bcache_major < 0) {
2157                 unregister_reboot_notifier(&reboot);
2158                 mutex_destroy(&bch_register_lock);
2159                 return bcache_major;
2160         }
2161
2162         if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
2163             !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
2164             bch_request_init() ||
2165             bch_debug_init(bcache_kobj) ||
2166             sysfs_create_files(bcache_kobj, files))
2167                 goto err;
2168
2169         return 0;
2170 err:
2171         bcache_exit();
2172         return -ENOMEM;
2173 }
2174
2175 module_exit(bcache_exit);
2176 module_init(bcache_init);