GNU Linux-libre 4.19.211-gnu1
[releases.git] / fs / btrfs / inode-map.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/kthread.h>
7 #include <linux/pagemap.h>
8
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "free-space-cache.h"
12 #include "inode-map.h"
13 #include "transaction.h"
14
15 static void fail_caching_thread(struct btrfs_root *root)
16 {
17         struct btrfs_fs_info *fs_info = root->fs_info;
18
19         btrfs_warn(fs_info, "failed to start inode caching task");
20         btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
21                                      "disabling inode map caching");
22         spin_lock(&root->ino_cache_lock);
23         root->ino_cache_state = BTRFS_CACHE_ERROR;
24         spin_unlock(&root->ino_cache_lock);
25         wake_up(&root->ino_cache_wait);
26 }
27
28 static int caching_kthread(void *data)
29 {
30         struct btrfs_root *root = data;
31         struct btrfs_fs_info *fs_info = root->fs_info;
32         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
33         struct btrfs_key key;
34         struct btrfs_path *path;
35         struct extent_buffer *leaf;
36         u64 last = (u64)-1;
37         int slot;
38         int ret;
39
40         if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
41                 return 0;
42
43         path = btrfs_alloc_path();
44         if (!path) {
45                 fail_caching_thread(root);
46                 return -ENOMEM;
47         }
48
49         /* Since the commit root is read-only, we can safely skip locking. */
50         path->skip_locking = 1;
51         path->search_commit_root = 1;
52         path->reada = READA_FORWARD;
53
54         key.objectid = BTRFS_FIRST_FREE_OBJECTID;
55         key.offset = 0;
56         key.type = BTRFS_INODE_ITEM_KEY;
57 again:
58         /* need to make sure the commit_root doesn't disappear */
59         down_read(&fs_info->commit_root_sem);
60
61         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
62         if (ret < 0)
63                 goto out;
64
65         while (1) {
66                 if (btrfs_fs_closing(fs_info))
67                         goto out;
68
69                 leaf = path->nodes[0];
70                 slot = path->slots[0];
71                 if (slot >= btrfs_header_nritems(leaf)) {
72                         ret = btrfs_next_leaf(root, path);
73                         if (ret < 0)
74                                 goto out;
75                         else if (ret > 0)
76                                 break;
77
78                         if (need_resched() ||
79                             btrfs_transaction_in_commit(fs_info)) {
80                                 leaf = path->nodes[0];
81
82                                 if (WARN_ON(btrfs_header_nritems(leaf) == 0))
83                                         break;
84
85                                 /*
86                                  * Save the key so we can advances forward
87                                  * in the next search.
88                                  */
89                                 btrfs_item_key_to_cpu(leaf, &key, 0);
90                                 btrfs_release_path(path);
91                                 root->ino_cache_progress = last;
92                                 up_read(&fs_info->commit_root_sem);
93                                 schedule_timeout(1);
94                                 goto again;
95                         } else
96                                 continue;
97                 }
98
99                 btrfs_item_key_to_cpu(leaf, &key, slot);
100
101                 if (key.type != BTRFS_INODE_ITEM_KEY)
102                         goto next;
103
104                 if (key.objectid >= root->highest_objectid)
105                         break;
106
107                 if (last != (u64)-1 && last + 1 != key.objectid) {
108                         __btrfs_add_free_space(fs_info, ctl, last + 1,
109                                                key.objectid - last - 1);
110                         wake_up(&root->ino_cache_wait);
111                 }
112
113                 last = key.objectid;
114 next:
115                 path->slots[0]++;
116         }
117
118         if (last < root->highest_objectid - 1) {
119                 __btrfs_add_free_space(fs_info, ctl, last + 1,
120                                        root->highest_objectid - last - 1);
121         }
122
123         spin_lock(&root->ino_cache_lock);
124         root->ino_cache_state = BTRFS_CACHE_FINISHED;
125         spin_unlock(&root->ino_cache_lock);
126
127         root->ino_cache_progress = (u64)-1;
128         btrfs_unpin_free_ino(root);
129 out:
130         wake_up(&root->ino_cache_wait);
131         up_read(&fs_info->commit_root_sem);
132
133         btrfs_free_path(path);
134
135         return ret;
136 }
137
138 static void start_caching(struct btrfs_root *root)
139 {
140         struct btrfs_fs_info *fs_info = root->fs_info;
141         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
142         struct task_struct *tsk;
143         int ret;
144         u64 objectid;
145
146         if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
147                 return;
148
149         spin_lock(&root->ino_cache_lock);
150         if (root->ino_cache_state != BTRFS_CACHE_NO) {
151                 spin_unlock(&root->ino_cache_lock);
152                 return;
153         }
154
155         root->ino_cache_state = BTRFS_CACHE_STARTED;
156         spin_unlock(&root->ino_cache_lock);
157
158         ret = load_free_ino_cache(fs_info, root);
159         if (ret == 1) {
160                 spin_lock(&root->ino_cache_lock);
161                 root->ino_cache_state = BTRFS_CACHE_FINISHED;
162                 spin_unlock(&root->ino_cache_lock);
163                 wake_up(&root->ino_cache_wait);
164                 return;
165         }
166
167         /*
168          * It can be quite time-consuming to fill the cache by searching
169          * through the extent tree, and this can keep ino allocation path
170          * waiting. Therefore at start we quickly find out the highest
171          * inode number and we know we can use inode numbers which fall in
172          * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
173          */
174         ret = btrfs_find_free_objectid(root, &objectid);
175         if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
176                 __btrfs_add_free_space(fs_info, ctl, objectid,
177                                        BTRFS_LAST_FREE_OBJECTID - objectid + 1);
178         }
179
180         tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
181                           root->root_key.objectid);
182         if (IS_ERR(tsk))
183                 fail_caching_thread(root);
184 }
185
186 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
187 {
188         if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
189                 return btrfs_find_free_objectid(root, objectid);
190
191 again:
192         *objectid = btrfs_find_ino_for_alloc(root);
193
194         if (*objectid != 0)
195                 return 0;
196
197         start_caching(root);
198
199         wait_event(root->ino_cache_wait,
200                    root->ino_cache_state == BTRFS_CACHE_FINISHED ||
201                    root->ino_cache_state == BTRFS_CACHE_ERROR ||
202                    root->free_ino_ctl->free_space > 0);
203
204         if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
205             root->free_ino_ctl->free_space == 0)
206                 return -ENOSPC;
207         else if (root->ino_cache_state == BTRFS_CACHE_ERROR)
208                 return btrfs_find_free_objectid(root, objectid);
209         else
210                 goto again;
211 }
212
213 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
214 {
215         struct btrfs_fs_info *fs_info = root->fs_info;
216         struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
217
218         if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
219                 return;
220 again:
221         if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
222                 __btrfs_add_free_space(fs_info, pinned, objectid, 1);
223         } else {
224                 down_write(&fs_info->commit_root_sem);
225                 spin_lock(&root->ino_cache_lock);
226                 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
227                         spin_unlock(&root->ino_cache_lock);
228                         up_write(&fs_info->commit_root_sem);
229                         goto again;
230                 }
231                 spin_unlock(&root->ino_cache_lock);
232
233                 start_caching(root);
234
235                 __btrfs_add_free_space(fs_info, pinned, objectid, 1);
236
237                 up_write(&fs_info->commit_root_sem);
238         }
239 }
240
241 /*
242  * When a transaction is committed, we'll move those inode numbers which are
243  * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
244  * others will just be dropped, because the commit root we were searching has
245  * changed.
246  *
247  * Must be called with root->fs_info->commit_root_sem held
248  */
249 void btrfs_unpin_free_ino(struct btrfs_root *root)
250 {
251         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
252         struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
253         spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
254         struct btrfs_free_space *info;
255         struct rb_node *n;
256         u64 count;
257
258         if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
259                 return;
260
261         while (1) {
262                 spin_lock(rbroot_lock);
263                 n = rb_first(rbroot);
264                 if (!n) {
265                         spin_unlock(rbroot_lock);
266                         break;
267                 }
268
269                 info = rb_entry(n, struct btrfs_free_space, offset_index);
270                 BUG_ON(info->bitmap); /* Logic error */
271
272                 if (info->offset > root->ino_cache_progress)
273                         count = 0;
274                 else
275                         count = min(root->ino_cache_progress - info->offset + 1,
276                                     info->bytes);
277
278                 rb_erase(&info->offset_index, rbroot);
279                 spin_unlock(rbroot_lock);
280                 if (count)
281                         __btrfs_add_free_space(root->fs_info, ctl,
282                                                info->offset, count);
283                 kmem_cache_free(btrfs_free_space_cachep, info);
284         }
285 }
286
287 #define INIT_THRESHOLD  ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
288 #define INODES_PER_BITMAP (PAGE_SIZE * 8)
289
290 /*
291  * The goal is to keep the memory used by the free_ino tree won't
292  * exceed the memory if we use bitmaps only.
293  */
294 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
295 {
296         struct btrfs_free_space *info;
297         struct rb_node *n;
298         int max_ino;
299         int max_bitmaps;
300
301         n = rb_last(&ctl->free_space_offset);
302         if (!n) {
303                 ctl->extents_thresh = INIT_THRESHOLD;
304                 return;
305         }
306         info = rb_entry(n, struct btrfs_free_space, offset_index);
307
308         /*
309          * Find the maximum inode number in the filesystem. Note we
310          * ignore the fact that this can be a bitmap, because we are
311          * not doing precise calculation.
312          */
313         max_ino = info->bytes - 1;
314
315         max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
316         if (max_bitmaps <= ctl->total_bitmaps) {
317                 ctl->extents_thresh = 0;
318                 return;
319         }
320
321         ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
322                                 PAGE_SIZE / sizeof(*info);
323 }
324
325 /*
326  * We don't fall back to bitmap, if we are below the extents threshold
327  * or this chunk of inode numbers is a big one.
328  */
329 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
330                        struct btrfs_free_space *info)
331 {
332         if (ctl->free_extents < ctl->extents_thresh ||
333             info->bytes > INODES_PER_BITMAP / 10)
334                 return false;
335
336         return true;
337 }
338
339 static const struct btrfs_free_space_op free_ino_op = {
340         .recalc_thresholds      = recalculate_thresholds,
341         .use_bitmap             = use_bitmap,
342 };
343
344 static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
345 {
346 }
347
348 static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
349                               struct btrfs_free_space *info)
350 {
351         /*
352          * We always use extents for two reasons:
353          *
354          * - The pinned tree is only used during the process of caching
355          *   work.
356          * - Make code simpler. See btrfs_unpin_free_ino().
357          */
358         return false;
359 }
360
361 static const struct btrfs_free_space_op pinned_free_ino_op = {
362         .recalc_thresholds      = pinned_recalc_thresholds,
363         .use_bitmap             = pinned_use_bitmap,
364 };
365
366 void btrfs_init_free_ino_ctl(struct btrfs_root *root)
367 {
368         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
369         struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
370
371         spin_lock_init(&ctl->tree_lock);
372         ctl->unit = 1;
373         ctl->start = 0;
374         ctl->private = NULL;
375         ctl->op = &free_ino_op;
376         INIT_LIST_HEAD(&ctl->trimming_ranges);
377         mutex_init(&ctl->cache_writeout_mutex);
378
379         /*
380          * Initially we allow to use 16K of ram to cache chunks of
381          * inode numbers before we resort to bitmaps. This is somewhat
382          * arbitrary, but it will be adjusted in runtime.
383          */
384         ctl->extents_thresh = INIT_THRESHOLD;
385
386         spin_lock_init(&pinned->tree_lock);
387         pinned->unit = 1;
388         pinned->start = 0;
389         pinned->private = NULL;
390         pinned->extents_thresh = 0;
391         pinned->op = &pinned_free_ino_op;
392 }
393
394 int btrfs_save_ino_cache(struct btrfs_root *root,
395                          struct btrfs_trans_handle *trans)
396 {
397         struct btrfs_fs_info *fs_info = root->fs_info;
398         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
399         struct btrfs_path *path;
400         struct inode *inode;
401         struct btrfs_block_rsv *rsv;
402         struct extent_changeset *data_reserved = NULL;
403         u64 num_bytes;
404         u64 alloc_hint = 0;
405         int ret;
406         int prealloc;
407         bool retry = false;
408
409         /* only fs tree and subvol/snap needs ino cache */
410         if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
411             (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
412              root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
413                 return 0;
414
415         /* Don't save inode cache if we are deleting this root */
416         if (btrfs_root_refs(&root->root_item) == 0)
417                 return 0;
418
419         if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
420                 return 0;
421
422         path = btrfs_alloc_path();
423         if (!path)
424                 return -ENOMEM;
425
426         rsv = trans->block_rsv;
427         trans->block_rsv = &fs_info->trans_block_rsv;
428
429         num_bytes = trans->bytes_reserved;
430         /*
431          * 1 item for inode item insertion if need
432          * 4 items for inode item update (in the worst case)
433          * 1 items for slack space if we need do truncation
434          * 1 item for free space object
435          * 3 items for pre-allocation
436          */
437         trans->bytes_reserved = btrfs_calc_trans_metadata_size(fs_info, 10);
438         ret = btrfs_block_rsv_add(root, trans->block_rsv,
439                                   trans->bytes_reserved,
440                                   BTRFS_RESERVE_NO_FLUSH);
441         if (ret)
442                 goto out;
443         trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
444                                       trans->bytes_reserved, 1);
445 again:
446         inode = lookup_free_ino_inode(root, path);
447         if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
448                 ret = PTR_ERR(inode);
449                 goto out_release;
450         }
451
452         if (IS_ERR(inode)) {
453                 BUG_ON(retry); /* Logic error */
454                 retry = true;
455
456                 ret = create_free_ino_inode(root, trans, path);
457                 if (ret)
458                         goto out_release;
459                 goto again;
460         }
461
462         BTRFS_I(inode)->generation = 0;
463         ret = btrfs_update_inode(trans, root, inode);
464         if (ret) {
465                 btrfs_abort_transaction(trans, ret);
466                 goto out_put;
467         }
468
469         if (i_size_read(inode) > 0) {
470                 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
471                 if (ret) {
472                         if (ret != -ENOSPC)
473                                 btrfs_abort_transaction(trans, ret);
474                         goto out_put;
475                 }
476         }
477
478         spin_lock(&root->ino_cache_lock);
479         if (root->ino_cache_state != BTRFS_CACHE_FINISHED) {
480                 ret = -1;
481                 spin_unlock(&root->ino_cache_lock);
482                 goto out_put;
483         }
484         spin_unlock(&root->ino_cache_lock);
485
486         spin_lock(&ctl->tree_lock);
487         prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
488         prealloc = ALIGN(prealloc, PAGE_SIZE);
489         prealloc += ctl->total_bitmaps * PAGE_SIZE;
490         spin_unlock(&ctl->tree_lock);
491
492         /* Just to make sure we have enough space */
493         prealloc += 8 * PAGE_SIZE;
494
495         ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 0, prealloc);
496         if (ret)
497                 goto out_put;
498
499         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
500                                               prealloc, prealloc, &alloc_hint);
501         if (ret) {
502                 btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
503                 btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
504                 goto out_put;
505         }
506
507         ret = btrfs_write_out_ino_cache(root, trans, path, inode);
508         btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
509 out_put:
510         iput(inode);
511 out_release:
512         trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
513                                       trans->bytes_reserved, 0);
514         btrfs_block_rsv_release(fs_info, trans->block_rsv,
515                                 trans->bytes_reserved);
516 out:
517         trans->block_rsv = rsv;
518         trans->bytes_reserved = num_bytes;
519
520         btrfs_free_path(path);
521         extent_changeset_free(data_reserved);
522         return ret;
523 }
524
525 int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
526 {
527         struct btrfs_path *path;
528         int ret;
529         struct extent_buffer *l;
530         struct btrfs_key search_key;
531         struct btrfs_key found_key;
532         int slot;
533
534         path = btrfs_alloc_path();
535         if (!path)
536                 return -ENOMEM;
537
538         search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
539         search_key.type = -1;
540         search_key.offset = (u64)-1;
541         ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
542         if (ret < 0)
543                 goto error;
544         BUG_ON(ret == 0); /* Corruption */
545         if (path->slots[0] > 0) {
546                 slot = path->slots[0] - 1;
547                 l = path->nodes[0];
548                 btrfs_item_key_to_cpu(l, &found_key, slot);
549                 *objectid = max_t(u64, found_key.objectid,
550                                   BTRFS_FIRST_FREE_OBJECTID - 1);
551         } else {
552                 *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
553         }
554         ret = 0;
555 error:
556         btrfs_free_path(path);
557         return ret;
558 }
559
560 int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
561 {
562         int ret;
563         mutex_lock(&root->objectid_mutex);
564
565         if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
566                 btrfs_warn(root->fs_info,
567                            "the objectid of root %llu reaches its highest value",
568                            root->root_key.objectid);
569                 ret = -ENOSPC;
570                 goto out;
571         }
572
573         *objectid = ++root->highest_objectid;
574         ret = 0;
575 out:
576         mutex_unlock(&root->objectid_mutex);
577         return ret;
578 }