GNU Linux-libre 4.4.289-gnu1
[releases.git] / drivers / md / bcache / journal.c
1 /*
2  * bcache journalling code, for btree insertions
3  *
4  * Copyright 2012 Google, Inc.
5  */
6
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "extents.h"
11
12 #include <trace/events/bcache.h>
13
14 /*
15  * Journal replay/recovery:
16  *
17  * This code is all driven from run_cache_set(); we first read the journal
18  * entries, do some other stuff, then we mark all the keys in the journal
19  * entries (same as garbage collection would), then we replay them - reinserting
20  * them into the cache in precisely the same order as they appear in the
21  * journal.
22  *
23  * We only journal keys that go in leaf nodes, which simplifies things quite a
24  * bit.
25  */
26
27 static void journal_read_endio(struct bio *bio)
28 {
29         struct closure *cl = bio->bi_private;
30         closure_put(cl);
31 }
32
33 static int journal_read_bucket(struct cache *ca, struct list_head *list,
34                                unsigned bucket_index)
35 {
36         struct journal_device *ja = &ca->journal;
37         struct bio *bio = &ja->bio;
38
39         struct journal_replay *i;
40         struct jset *j, *data = ca->set->journal.w[0].data;
41         struct closure cl;
42         unsigned len, left, offset = 0;
43         int ret = 0;
44         sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
45
46         closure_init_stack(&cl);
47
48         pr_debug("reading %u", bucket_index);
49
50         while (offset < ca->sb.bucket_size) {
51 reread:         left = ca->sb.bucket_size - offset;
52                 len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
53
54                 bio_reset(bio);
55                 bio->bi_iter.bi_sector  = bucket + offset;
56                 bio->bi_bdev    = ca->bdev;
57                 bio->bi_rw      = READ;
58                 bio->bi_iter.bi_size    = len << 9;
59
60                 bio->bi_end_io  = journal_read_endio;
61                 bio->bi_private = &cl;
62                 bch_bio_map(bio, data);
63
64                 closure_bio_submit(bio, &cl);
65                 closure_sync(&cl);
66
67                 /* This function could be simpler now since we no longer write
68                  * journal entries that overlap bucket boundaries; this means
69                  * the start of a bucket will always have a valid journal entry
70                  * if it has any journal entries at all.
71                  */
72
73                 j = data;
74                 while (len) {
75                         struct list_head *where;
76                         size_t blocks, bytes = set_bytes(j);
77
78                         if (j->magic != jset_magic(&ca->sb)) {
79                                 pr_debug("%u: bad magic", bucket_index);
80                                 return ret;
81                         }
82
83                         if (bytes > left << 9 ||
84                             bytes > PAGE_SIZE << JSET_BITS) {
85                                 pr_info("%u: too big, %zu bytes, offset %u",
86                                         bucket_index, bytes, offset);
87                                 return ret;
88                         }
89
90                         if (bytes > len << 9)
91                                 goto reread;
92
93                         if (j->csum != csum_set(j)) {
94                                 pr_info("%u: bad csum, %zu bytes, offset %u",
95                                         bucket_index, bytes, offset);
96                                 return ret;
97                         }
98
99                         blocks = set_blocks(j, block_bytes(ca->set));
100
101                         while (!list_empty(list)) {
102                                 i = list_first_entry(list,
103                                         struct journal_replay, list);
104                                 if (i->j.seq >= j->last_seq)
105                                         break;
106                                 list_del(&i->list);
107                                 kfree(i);
108                         }
109
110                         list_for_each_entry_reverse(i, list, list) {
111                                 if (j->seq == i->j.seq)
112                                         goto next_set;
113
114                                 if (j->seq < i->j.last_seq)
115                                         goto next_set;
116
117                                 if (j->seq > i->j.seq) {
118                                         where = &i->list;
119                                         goto add;
120                                 }
121                         }
122
123                         where = list;
124 add:
125                         i = kmalloc(offsetof(struct journal_replay, j) +
126                                     bytes, GFP_KERNEL);
127                         if (!i)
128                                 return -ENOMEM;
129                         memcpy(&i->j, j, bytes);
130                         list_add(&i->list, where);
131                         ret = 1;
132
133                         ja->seq[bucket_index] = j->seq;
134 next_set:
135                         offset  += blocks * ca->sb.block_size;
136                         len     -= blocks * ca->sb.block_size;
137                         j = ((void *) j) + blocks * block_bytes(ca);
138                 }
139         }
140
141         return ret;
142 }
143
144 int bch_journal_read(struct cache_set *c, struct list_head *list)
145 {
146 #define read_bucket(b)                                                  \
147         ({                                                              \
148                 int ret = journal_read_bucket(ca, list, b);             \
149                 __set_bit(b, bitmap);                                   \
150                 if (ret < 0)                                            \
151                         return ret;                                     \
152                 ret;                                                    \
153         })
154
155         struct cache *ca;
156         unsigned iter;
157
158         for_each_cache(ca, c, iter) {
159                 struct journal_device *ja = &ca->journal;
160                 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
161                 unsigned i, l, r, m;
162                 uint64_t seq;
163
164                 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
165                 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
166
167                 /*
168                  * Read journal buckets ordered by golden ratio hash to quickly
169                  * find a sequence of buckets with valid journal entries
170                  */
171                 for (i = 0; i < ca->sb.njournal_buckets; i++) {
172                         l = (i * 2654435769U) % ca->sb.njournal_buckets;
173
174                         if (test_bit(l, bitmap))
175                                 break;
176
177                         if (read_bucket(l))
178                                 goto bsearch;
179                 }
180
181                 /*
182                  * If that fails, check all the buckets we haven't checked
183                  * already
184                  */
185                 pr_debug("falling back to linear search");
186
187                 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
188                      l < ca->sb.njournal_buckets;
189                      l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
190                         if (read_bucket(l))
191                                 goto bsearch;
192
193                 /* no journal entries on this device? */
194                 if (l == ca->sb.njournal_buckets)
195                         continue;
196 bsearch:
197                 BUG_ON(list_empty(list));
198
199                 /* Binary search */
200                 m = l;
201                 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
202                 pr_debug("starting binary search, l %u r %u", l, r);
203
204                 while (l + 1 < r) {
205                         seq = list_entry(list->prev, struct journal_replay,
206                                          list)->j.seq;
207
208                         m = (l + r) >> 1;
209                         read_bucket(m);
210
211                         if (seq != list_entry(list->prev, struct journal_replay,
212                                               list)->j.seq)
213                                 l = m;
214                         else
215                                 r = m;
216                 }
217
218                 /*
219                  * Read buckets in reverse order until we stop finding more
220                  * journal entries
221                  */
222                 pr_debug("finishing up: m %u njournal_buckets %u",
223                          m, ca->sb.njournal_buckets);
224                 l = m;
225
226                 while (1) {
227                         if (!l--)
228                                 l = ca->sb.njournal_buckets - 1;
229
230                         if (l == m)
231                                 break;
232
233                         if (test_bit(l, bitmap))
234                                 continue;
235
236                         if (!read_bucket(l))
237                                 break;
238                 }
239
240                 seq = 0;
241
242                 for (i = 0; i < ca->sb.njournal_buckets; i++)
243                         if (ja->seq[i] > seq) {
244                                 seq = ja->seq[i];
245                                 /*
246                                  * When journal_reclaim() goes to allocate for
247                                  * the first time, it'll use the bucket after
248                                  * ja->cur_idx
249                                  */
250                                 ja->cur_idx = i;
251                                 ja->last_idx = ja->discard_idx = (i + 1) %
252                                         ca->sb.njournal_buckets;
253
254                         }
255         }
256
257         if (!list_empty(list))
258                 c->journal.seq = list_entry(list->prev,
259                                             struct journal_replay,
260                                             list)->j.seq;
261
262         return 0;
263 #undef read_bucket
264 }
265
266 void bch_journal_mark(struct cache_set *c, struct list_head *list)
267 {
268         atomic_t p = { 0 };
269         struct bkey *k;
270         struct journal_replay *i;
271         struct journal *j = &c->journal;
272         uint64_t last = j->seq;
273
274         /*
275          * journal.pin should never fill up - we never write a journal
276          * entry when it would fill up. But if for some reason it does, we
277          * iterate over the list in reverse order so that we can just skip that
278          * refcount instead of bugging.
279          */
280
281         list_for_each_entry_reverse(i, list, list) {
282                 BUG_ON(last < i->j.seq);
283                 i->pin = NULL;
284
285                 while (last-- != i->j.seq)
286                         if (fifo_free(&j->pin) > 1) {
287                                 fifo_push_front(&j->pin, p);
288                                 atomic_set(&fifo_front(&j->pin), 0);
289                         }
290
291                 if (fifo_free(&j->pin) > 1) {
292                         fifo_push_front(&j->pin, p);
293                         i->pin = &fifo_front(&j->pin);
294                         atomic_set(i->pin, 1);
295                 }
296
297                 for (k = i->j.start;
298                      k < bset_bkey_last(&i->j);
299                      k = bkey_next(k))
300                         if (!__bch_extent_invalid(c, k)) {
301                                 unsigned j;
302
303                                 for (j = 0; j < KEY_PTRS(k); j++)
304                                         if (ptr_available(c, k, j))
305                                                 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
306
307                                 bch_initial_mark_key(c, 0, k);
308                         }
309         }
310 }
311
312 bool is_discard_enabled(struct cache_set *s)
313 {
314         struct cache *ca;
315         unsigned int i;
316
317         for_each_cache(ca, s, i)
318                 if (ca->discard)
319                         return true;
320
321         return false;
322 }
323
324 int bch_journal_replay(struct cache_set *s, struct list_head *list)
325 {
326         int ret = 0, keys = 0, entries = 0;
327         struct bkey *k;
328         struct journal_replay *i =
329                 list_entry(list->prev, struct journal_replay, list);
330
331         uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
332         struct keylist keylist;
333
334         list_for_each_entry(i, list, list) {
335                 BUG_ON(i->pin && atomic_read(i->pin) != 1);
336
337                 if (n != i->j.seq) {
338                         if (n == start && is_discard_enabled(s))
339                                 pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
340                                         n, i->j.seq - 1, start, end);
341                         else {
342                                 pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
343                                         n, i->j.seq - 1, start, end);
344                                 ret = -EIO;
345                                 goto err;
346                         }
347                 }
348
349                 for (k = i->j.start;
350                      k < bset_bkey_last(&i->j);
351                      k = bkey_next(k)) {
352                         trace_bcache_journal_replay_key(k);
353
354                         bch_keylist_init_single(&keylist, k);
355
356                         ret = bch_btree_insert(s, &keylist, i->pin, NULL);
357                         if (ret)
358                                 goto err;
359
360                         BUG_ON(!bch_keylist_empty(&keylist));
361                         keys++;
362
363                         cond_resched();
364                 }
365
366                 if (i->pin)
367                         atomic_dec(i->pin);
368                 n = i->j.seq + 1;
369                 entries++;
370         }
371
372         pr_info("journal replay done, %i keys in %i entries, seq %llu",
373                 keys, entries, end);
374 err:
375         while (!list_empty(list)) {
376                 i = list_first_entry(list, struct journal_replay, list);
377                 list_del(&i->list);
378                 kfree(i);
379         }
380
381         return ret;
382 }
383
384 /* Journalling */
385
386 static void btree_flush_write(struct cache_set *c)
387 {
388         /*
389          * Try to find the btree node with that references the oldest journal
390          * entry, best is our current candidate and is locked if non NULL:
391          */
392         struct btree *b, *best;
393         unsigned i;
394 retry:
395         best = NULL;
396
397         for_each_cached_btree(b, c, i)
398                 if (btree_current_write(b)->journal) {
399                         if (!best)
400                                 best = b;
401                         else if (journal_pin_cmp(c,
402                                         btree_current_write(best)->journal,
403                                         btree_current_write(b)->journal)) {
404                                 best = b;
405                         }
406                 }
407
408         b = best;
409         if (b) {
410                 mutex_lock(&b->write_lock);
411                 if (!btree_current_write(b)->journal) {
412                         mutex_unlock(&b->write_lock);
413                         /* We raced */
414                         goto retry;
415                 }
416
417                 __bch_btree_node_write(b, NULL);
418                 mutex_unlock(&b->write_lock);
419         }
420 }
421
422 #define last_seq(j)     ((j)->seq - fifo_used(&(j)->pin) + 1)
423
424 static void journal_discard_endio(struct bio *bio)
425 {
426         struct journal_device *ja =
427                 container_of(bio, struct journal_device, discard_bio);
428         struct cache *ca = container_of(ja, struct cache, journal);
429
430         atomic_set(&ja->discard_in_flight, DISCARD_DONE);
431
432         closure_wake_up(&ca->set->journal.wait);
433         closure_put(&ca->set->cl);
434 }
435
436 static void journal_discard_work(struct work_struct *work)
437 {
438         struct journal_device *ja =
439                 container_of(work, struct journal_device, discard_work);
440
441         submit_bio(0, &ja->discard_bio);
442 }
443
444 static void do_journal_discard(struct cache *ca)
445 {
446         struct journal_device *ja = &ca->journal;
447         struct bio *bio = &ja->discard_bio;
448
449         if (!ca->discard) {
450                 ja->discard_idx = ja->last_idx;
451                 return;
452         }
453
454         switch (atomic_read(&ja->discard_in_flight)) {
455         case DISCARD_IN_FLIGHT:
456                 return;
457
458         case DISCARD_DONE:
459                 ja->discard_idx = (ja->discard_idx + 1) %
460                         ca->sb.njournal_buckets;
461
462                 atomic_set(&ja->discard_in_flight, DISCARD_READY);
463                 /* fallthrough */
464
465         case DISCARD_READY:
466                 if (ja->discard_idx == ja->last_idx)
467                         return;
468
469                 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
470
471                 bio_init(bio);
472                 bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
473                                                 ca->sb.d[ja->discard_idx]);
474                 bio->bi_bdev            = ca->bdev;
475                 bio->bi_rw              = REQ_WRITE|REQ_DISCARD;
476                 bio->bi_max_vecs        = 1;
477                 bio->bi_io_vec          = bio->bi_inline_vecs;
478                 bio->bi_iter.bi_size    = bucket_bytes(ca);
479                 bio->bi_end_io          = journal_discard_endio;
480
481                 closure_get(&ca->set->cl);
482                 INIT_WORK(&ja->discard_work, journal_discard_work);
483                 schedule_work(&ja->discard_work);
484         }
485 }
486
487 static void journal_reclaim(struct cache_set *c)
488 {
489         struct bkey *k = &c->journal.key;
490         struct cache *ca;
491         uint64_t last_seq;
492         unsigned iter, n = 0;
493         atomic_t p;
494
495         while (!atomic_read(&fifo_front(&c->journal.pin)))
496                 fifo_pop(&c->journal.pin, p);
497
498         last_seq = last_seq(&c->journal);
499
500         /* Update last_idx */
501
502         for_each_cache(ca, c, iter) {
503                 struct journal_device *ja = &ca->journal;
504
505                 while (ja->last_idx != ja->cur_idx &&
506                        ja->seq[ja->last_idx] < last_seq)
507                         ja->last_idx = (ja->last_idx + 1) %
508                                 ca->sb.njournal_buckets;
509         }
510
511         for_each_cache(ca, c, iter)
512                 do_journal_discard(ca);
513
514         if (c->journal.blocks_free)
515                 goto out;
516
517         /*
518          * Allocate:
519          * XXX: Sort by free journal space
520          */
521
522         for_each_cache(ca, c, iter) {
523                 struct journal_device *ja = &ca->journal;
524                 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
525
526                 /* No space available on this device */
527                 if (next == ja->discard_idx)
528                         continue;
529
530                 ja->cur_idx = next;
531                 k->ptr[n++] = MAKE_PTR(0,
532                                   bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
533                                   ca->sb.nr_this_dev);
534         }
535
536         if (n) {
537                 bkey_init(k);
538                 SET_KEY_PTRS(k, n);
539                 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
540         }
541 out:
542         if (!journal_full(&c->journal))
543                 __closure_wake_up(&c->journal.wait);
544 }
545
546 void bch_journal_next(struct journal *j)
547 {
548         atomic_t p = { 1 };
549
550         j->cur = (j->cur == j->w)
551                 ? &j->w[1]
552                 : &j->w[0];
553
554         /*
555          * The fifo_push() needs to happen at the same time as j->seq is
556          * incremented for last_seq() to be calculated correctly
557          */
558         BUG_ON(!fifo_push(&j->pin, p));
559         atomic_set(&fifo_back(&j->pin), 1);
560
561         j->cur->data->seq       = ++j->seq;
562         j->cur->dirty           = false;
563         j->cur->need_write      = false;
564         j->cur->data->keys      = 0;
565
566         if (fifo_full(&j->pin))
567                 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
568 }
569
570 static void journal_write_endio(struct bio *bio)
571 {
572         struct journal_write *w = bio->bi_private;
573
574         cache_set_err_on(bio->bi_error, w->c, "journal io error");
575         closure_put(&w->c->journal.io);
576 }
577
578 static void journal_write(struct closure *);
579
580 static void journal_write_done(struct closure *cl)
581 {
582         struct journal *j = container_of(cl, struct journal, io);
583         struct journal_write *w = (j->cur == j->w)
584                 ? &j->w[1]
585                 : &j->w[0];
586
587         __closure_wake_up(&w->wait);
588         continue_at_nobarrier(cl, journal_write, system_wq);
589 }
590
591 static void journal_write_unlock(struct closure *cl)
592 {
593         struct cache_set *c = container_of(cl, struct cache_set, journal.io);
594
595         c->journal.io_in_flight = 0;
596         spin_unlock(&c->journal.lock);
597 }
598
599 static void journal_write_unlocked(struct closure *cl)
600         __releases(c->journal.lock)
601 {
602         struct cache_set *c = container_of(cl, struct cache_set, journal.io);
603         struct cache *ca;
604         struct journal_write *w = c->journal.cur;
605         struct bkey *k = &c->journal.key;
606         unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
607                 c->sb.block_size;
608
609         struct bio *bio;
610         struct bio_list list;
611         bio_list_init(&list);
612
613         if (!w->need_write) {
614                 closure_return_with_destructor(cl, journal_write_unlock);
615                 return;
616         } else if (journal_full(&c->journal)) {
617                 journal_reclaim(c);
618                 spin_unlock(&c->journal.lock);
619
620                 btree_flush_write(c);
621                 continue_at(cl, journal_write, system_wq);
622                 return;
623         }
624
625         c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
626
627         w->data->btree_level = c->root->level;
628
629         bkey_copy(&w->data->btree_root, &c->root->key);
630         bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
631
632         for_each_cache(ca, c, i)
633                 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
634
635         w->data->magic          = jset_magic(&c->sb);
636         w->data->version        = BCACHE_JSET_VERSION;
637         w->data->last_seq       = last_seq(&c->journal);
638         w->data->csum           = csum_set(w->data);
639
640         for (i = 0; i < KEY_PTRS(k); i++) {
641                 ca = PTR_CACHE(c, k, i);
642                 bio = &ca->journal.bio;
643
644                 atomic_long_add(sectors, &ca->meta_sectors_written);
645
646                 bio_reset(bio);
647                 bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
648                 bio->bi_bdev    = ca->bdev;
649                 bio->bi_rw      = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
650                 bio->bi_iter.bi_size = sectors << 9;
651
652                 bio->bi_end_io  = journal_write_endio;
653                 bio->bi_private = w;
654                 bch_bio_map(bio, w->data);
655
656                 trace_bcache_journal_write(bio);
657                 bio_list_add(&list, bio);
658
659                 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
660
661                 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
662         }
663
664         /* If KEY_PTRS(k) == 0, this jset gets lost in air */
665         BUG_ON(i == 0);
666
667         atomic_dec_bug(&fifo_back(&c->journal.pin));
668         bch_journal_next(&c->journal);
669         journal_reclaim(c);
670
671         spin_unlock(&c->journal.lock);
672
673         while ((bio = bio_list_pop(&list)))
674                 closure_bio_submit(bio, cl);
675
676         continue_at(cl, journal_write_done, NULL);
677 }
678
679 static void journal_write(struct closure *cl)
680 {
681         struct cache_set *c = container_of(cl, struct cache_set, journal.io);
682
683         spin_lock(&c->journal.lock);
684         journal_write_unlocked(cl);
685 }
686
687 static void journal_try_write(struct cache_set *c)
688         __releases(c->journal.lock)
689 {
690         struct closure *cl = &c->journal.io;
691         struct journal_write *w = c->journal.cur;
692
693         w->need_write = true;
694
695         if (!c->journal.io_in_flight) {
696                 c->journal.io_in_flight = 1;
697                 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
698         } else {
699                 spin_unlock(&c->journal.lock);
700         }
701 }
702
703 static struct journal_write *journal_wait_for_write(struct cache_set *c,
704                                                     unsigned nkeys)
705 {
706         size_t sectors;
707         struct closure cl;
708         bool wait = false;
709
710         closure_init_stack(&cl);
711
712         spin_lock(&c->journal.lock);
713
714         while (1) {
715                 struct journal_write *w = c->journal.cur;
716
717                 sectors = __set_blocks(w->data, w->data->keys + nkeys,
718                                        block_bytes(c)) * c->sb.block_size;
719
720                 if (sectors <= min_t(size_t,
721                                      c->journal.blocks_free * c->sb.block_size,
722                                      PAGE_SECTORS << JSET_BITS))
723                         return w;
724
725                 if (wait)
726                         closure_wait(&c->journal.wait, &cl);
727
728                 if (!journal_full(&c->journal)) {
729                         if (wait)
730                                 trace_bcache_journal_entry_full(c);
731
732                         /*
733                          * XXX: If we were inserting so many keys that they
734                          * won't fit in an _empty_ journal write, we'll
735                          * deadlock. For now, handle this in
736                          * bch_keylist_realloc() - but something to think about.
737                          */
738                         BUG_ON(!w->data->keys);
739
740                         journal_try_write(c); /* unlocks */
741                 } else {
742                         if (wait)
743                                 trace_bcache_journal_full(c);
744
745                         journal_reclaim(c);
746                         spin_unlock(&c->journal.lock);
747
748                         btree_flush_write(c);
749                 }
750
751                 closure_sync(&cl);
752                 spin_lock(&c->journal.lock);
753                 wait = true;
754         }
755 }
756
757 static void journal_write_work(struct work_struct *work)
758 {
759         struct cache_set *c = container_of(to_delayed_work(work),
760                                            struct cache_set,
761                                            journal.work);
762         spin_lock(&c->journal.lock);
763         if (c->journal.cur->dirty)
764                 journal_try_write(c);
765         else
766                 spin_unlock(&c->journal.lock);
767 }
768
769 /*
770  * Entry point to the journalling code - bio_insert() and btree_invalidate()
771  * pass bch_journal() a list of keys to be journalled, and then
772  * bch_journal() hands those same keys off to btree_insert_async()
773  */
774
775 atomic_t *bch_journal(struct cache_set *c,
776                       struct keylist *keys,
777                       struct closure *parent)
778 {
779         struct journal_write *w;
780         atomic_t *ret;
781
782         if (!CACHE_SYNC(&c->sb))
783                 return NULL;
784
785         w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
786
787         memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
788         w->data->keys += bch_keylist_nkeys(keys);
789
790         ret = &fifo_back(&c->journal.pin);
791         atomic_inc(ret);
792
793         if (parent) {
794                 closure_wait(&w->wait, parent);
795                 journal_try_write(c);
796         } else if (!w->dirty) {
797                 w->dirty = true;
798                 schedule_delayed_work(&c->journal.work,
799                                       msecs_to_jiffies(c->journal_delay_ms));
800                 spin_unlock(&c->journal.lock);
801         } else {
802                 spin_unlock(&c->journal.lock);
803         }
804
805
806         return ret;
807 }
808
809 void bch_journal_meta(struct cache_set *c, struct closure *cl)
810 {
811         struct keylist keys;
812         atomic_t *ref;
813
814         bch_keylist_init(&keys);
815
816         ref = bch_journal(c, &keys, cl);
817         if (ref)
818                 atomic_dec_bug(ref);
819 }
820
821 void bch_journal_free(struct cache_set *c)
822 {
823         free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
824         free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
825         free_fifo(&c->journal.pin);
826 }
827
828 int bch_journal_alloc(struct cache_set *c)
829 {
830         struct journal *j = &c->journal;
831
832         spin_lock_init(&j->lock);
833         INIT_DELAYED_WORK(&j->work, journal_write_work);
834
835         c->journal_delay_ms = 100;
836
837         j->w[0].c = c;
838         j->w[1].c = c;
839
840         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
841             !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
842             !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
843                 return -ENOMEM;
844
845         return 0;
846 }