GNU Linux-libre 6.0.2-gnu
[releases.git] / fs / btrfs / space-info.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12 #include "zoned.h"
13
14 /*
15  * HOW DOES SPACE RESERVATION WORK
16  *
17  * If you want to know about delalloc specifically, there is a separate comment
18  * for that with the delalloc code.  This comment is about how the whole system
19  * works generally.
20  *
21  * BASIC CONCEPTS
22  *
23  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
24  *   There's a description of the bytes_ fields with the struct declaration,
25  *   refer to that for specifics on each field.  Suffice it to say that for
26  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
27  *   determining if there is space to make an allocation.  There is a space_info
28  *   for METADATA, SYSTEM, and DATA areas.
29  *
30  *   2) block_rsv's.  These are basically buckets for every different type of
31  *   metadata reservation we have.  You can see the comment in the block_rsv
32  *   code on the rules for each type, but generally block_rsv->reserved is how
33  *   much space is accounted for in space_info->bytes_may_use.
34  *
35  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
36  *   on the number of items we will want to modify.  We have one for changing
37  *   items, and one for inserting new items.  Generally we use these helpers to
38  *   determine the size of the block reserves, and then use the actual bytes
39  *   values to adjust the space_info counters.
40  *
41  * MAKING RESERVATIONS, THE NORMAL CASE
42  *
43  *   We call into either btrfs_reserve_data_bytes() or
44  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
45  *   num_bytes we want to reserve.
46  *
47  *   ->reserve
48  *     space_info->bytes_may_reserve += num_bytes
49  *
50  *   ->extent allocation
51  *     Call btrfs_add_reserved_bytes() which does
52  *     space_info->bytes_may_reserve -= num_bytes
53  *     space_info->bytes_reserved += extent_bytes
54  *
55  *   ->insert reference
56  *     Call btrfs_update_block_group() which does
57  *     space_info->bytes_reserved -= extent_bytes
58  *     space_info->bytes_used += extent_bytes
59  *
60  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
61  *
62  *   Assume we are unable to simply make the reservation because we do not have
63  *   enough space
64  *
65  *   -> __reserve_bytes
66  *     create a reserve_ticket with ->bytes set to our reservation, add it to
67  *     the tail of space_info->tickets, kick async flush thread
68  *
69  *   ->handle_reserve_ticket
70  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
71  *     on the ticket.
72  *
73  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
74  *     Flushes various things attempting to free up space.
75  *
76  *   -> btrfs_try_granting_tickets()
77  *     This is called by anything that either subtracts space from
78  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
79  *     space_info->total_bytes.  This loops through the ->priority_tickets and
80  *     then the ->tickets list checking to see if the reservation can be
81  *     completed.  If it can the space is added to space_info->bytes_may_use and
82  *     the ticket is woken up.
83  *
84  *   -> ticket wakeup
85  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
86  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
87  *     were interrupted.)
88  *
89  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
90  *
91  *   Same as the above, except we add ourselves to the
92  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
93  *   call flush_space() ourselves for the states that are safe for us to call
94  *   without deadlocking and hope for the best.
95  *
96  * THE FLUSHING STATES
97  *
98  *   Generally speaking we will have two cases for each state, a "nice" state
99  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
100  *   reduce the locking over head on the various trees, and even to keep from
101  *   doing any work at all in the case of delayed refs.  Each of these delayed
102  *   things however hold reservations, and so letting them run allows us to
103  *   reclaim space so we can make new reservations.
104  *
105  *   FLUSH_DELAYED_ITEMS
106  *     Every inode has a delayed item to update the inode.  Take a simple write
107  *     for example, we would update the inode item at write time to update the
108  *     mtime, and then again at finish_ordered_io() time in order to update the
109  *     isize or bytes.  We keep these delayed items to coalesce these operations
110  *     into a single operation done on demand.  These are an easy way to reclaim
111  *     metadata space.
112  *
113  *   FLUSH_DELALLOC
114  *     Look at the delalloc comment to get an idea of how much space is reserved
115  *     for delayed allocation.  We can reclaim some of this space simply by
116  *     running delalloc, but usually we need to wait for ordered extents to
117  *     reclaim the bulk of this space.
118  *
119  *   FLUSH_DELAYED_REFS
120  *     We have a block reserve for the outstanding delayed refs space, and every
121  *     delayed ref operation holds a reservation.  Running these is a quick way
122  *     to reclaim space, but we want to hold this until the end because COW can
123  *     churn a lot and we can avoid making some extent tree modifications if we
124  *     are able to delay for as long as possible.
125  *
126  *   ALLOC_CHUNK
127  *     We will skip this the first time through space reservation, because of
128  *     overcommit and we don't want to have a lot of useless metadata space when
129  *     our worst case reservations will likely never come true.
130  *
131  *   RUN_DELAYED_IPUTS
132  *     If we're freeing inodes we're likely freeing checksums, file extent
133  *     items, and extent tree items.  Loads of space could be freed up by these
134  *     operations, however they won't be usable until the transaction commits.
135  *
136  *   COMMIT_TRANS
137  *     This will commit the transaction.  Historically we had a lot of logic
138  *     surrounding whether or not we'd commit the transaction, but this waits born
139  *     out of a pre-tickets era where we could end up committing the transaction
140  *     thousands of times in a row without making progress.  Now thanks to our
141  *     ticketing system we know if we're not making progress and can error
142  *     everybody out after a few commits rather than burning the disk hoping for
143  *     a different answer.
144  *
145  * OVERCOMMIT
146  *
147  *   Because we hold so many reservations for metadata we will allow you to
148  *   reserve more space than is currently free in the currently allocate
149  *   metadata space.  This only happens with metadata, data does not allow
150  *   overcommitting.
151  *
152  *   You can see the current logic for when we allow overcommit in
153  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
154  *   is no unallocated space to be had, all reservations are kept within the
155  *   free space in the allocated metadata chunks.
156  *
157  *   Because of overcommitting, you generally want to use the
158  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
159  *   thing with or without extra unallocated space.
160  */
161
162 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
163                           bool may_use_included)
164 {
165         ASSERT(s_info);
166         return s_info->bytes_used + s_info->bytes_reserved +
167                 s_info->bytes_pinned + s_info->bytes_readonly +
168                 s_info->bytes_zone_unusable +
169                 (may_use_included ? s_info->bytes_may_use : 0);
170 }
171
172 /*
173  * after adding space to the filesystem, we need to clear the full flags
174  * on all the space infos.
175  */
176 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
177 {
178         struct list_head *head = &info->space_info;
179         struct btrfs_space_info *found;
180
181         list_for_each_entry(found, head, list)
182                 found->full = 0;
183 }
184
185 /*
186  * Block groups with more than this value (percents) of unusable space will be
187  * scheduled for background reclaim.
188  */
189 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH                      (75)
190
191 /*
192  * Calculate chunk size depending on volume type (regular or zoned).
193  */
194 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
195 {
196         if (btrfs_is_zoned(fs_info))
197                 return fs_info->zone_size;
198
199         ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
200
201         if (flags & BTRFS_BLOCK_GROUP_DATA)
202                 return BTRFS_MAX_DATA_CHUNK_SIZE;
203         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
204                 return SZ_32M;
205
206         /* Handle BTRFS_BLOCK_GROUP_METADATA */
207         if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
208                 return SZ_1G;
209
210         return SZ_256M;
211 }
212
213 /*
214  * Update default chunk size.
215  */
216 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
217                                         u64 chunk_size)
218 {
219         WRITE_ONCE(space_info->chunk_size, chunk_size);
220 }
221
222 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
223 {
224
225         struct btrfs_space_info *space_info;
226         int i;
227         int ret;
228
229         space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
230         if (!space_info)
231                 return -ENOMEM;
232
233         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
234                 INIT_LIST_HEAD(&space_info->block_groups[i]);
235         init_rwsem(&space_info->groups_sem);
236         spin_lock_init(&space_info->lock);
237         space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
238         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
239         INIT_LIST_HEAD(&space_info->ro_bgs);
240         INIT_LIST_HEAD(&space_info->tickets);
241         INIT_LIST_HEAD(&space_info->priority_tickets);
242         space_info->clamp = 1;
243         btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
244
245         if (btrfs_is_zoned(info))
246                 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
247
248         ret = btrfs_sysfs_add_space_info_type(info, space_info);
249         if (ret)
250                 return ret;
251
252         list_add(&space_info->list, &info->space_info);
253         if (flags & BTRFS_BLOCK_GROUP_DATA)
254                 info->data_sinfo = space_info;
255
256         return ret;
257 }
258
259 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
260 {
261         struct btrfs_super_block *disk_super;
262         u64 features;
263         u64 flags;
264         int mixed = 0;
265         int ret;
266
267         disk_super = fs_info->super_copy;
268         if (!btrfs_super_root(disk_super))
269                 return -EINVAL;
270
271         features = btrfs_super_incompat_flags(disk_super);
272         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
273                 mixed = 1;
274
275         flags = BTRFS_BLOCK_GROUP_SYSTEM;
276         ret = create_space_info(fs_info, flags);
277         if (ret)
278                 goto out;
279
280         if (mixed) {
281                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
282                 ret = create_space_info(fs_info, flags);
283         } else {
284                 flags = BTRFS_BLOCK_GROUP_METADATA;
285                 ret = create_space_info(fs_info, flags);
286                 if (ret)
287                         goto out;
288
289                 flags = BTRFS_BLOCK_GROUP_DATA;
290                 ret = create_space_info(fs_info, flags);
291         }
292 out:
293         return ret;
294 }
295
296 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
297                              u64 total_bytes, u64 bytes_used,
298                              u64 bytes_readonly, u64 bytes_zone_unusable,
299                              bool active, struct btrfs_space_info **space_info)
300 {
301         struct btrfs_space_info *found;
302         int factor;
303
304         factor = btrfs_bg_type_to_factor(flags);
305
306         found = btrfs_find_space_info(info, flags);
307         ASSERT(found);
308         spin_lock(&found->lock);
309         found->total_bytes += total_bytes;
310         if (active)
311                 found->active_total_bytes += total_bytes;
312         found->disk_total += total_bytes * factor;
313         found->bytes_used += bytes_used;
314         found->disk_used += bytes_used * factor;
315         found->bytes_readonly += bytes_readonly;
316         found->bytes_zone_unusable += bytes_zone_unusable;
317         if (total_bytes > 0)
318                 found->full = 0;
319         btrfs_try_granting_tickets(info, found);
320         spin_unlock(&found->lock);
321         *space_info = found;
322 }
323
324 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
325                                                u64 flags)
326 {
327         struct list_head *head = &info->space_info;
328         struct btrfs_space_info *found;
329
330         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
331
332         list_for_each_entry(found, head, list) {
333                 if (found->flags & flags)
334                         return found;
335         }
336         return NULL;
337 }
338
339 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
340                           struct btrfs_space_info *space_info,
341                           enum btrfs_reserve_flush_enum flush)
342 {
343         u64 profile;
344         u64 avail;
345         int factor;
346
347         if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
348                 profile = btrfs_system_alloc_profile(fs_info);
349         else
350                 profile = btrfs_metadata_alloc_profile(fs_info);
351
352         avail = atomic64_read(&fs_info->free_chunk_space);
353
354         /*
355          * If we have dup, raid1 or raid10 then only half of the free
356          * space is actually usable.  For raid56, the space info used
357          * doesn't include the parity drive, so we don't have to
358          * change the math
359          */
360         factor = btrfs_bg_type_to_factor(profile);
361         avail = div_u64(avail, factor);
362
363         /*
364          * If we aren't flushing all things, let us overcommit up to
365          * 1/2th of the space. If we can flush, don't let us overcommit
366          * too much, let it overcommit up to 1/8 of the space.
367          */
368         if (flush == BTRFS_RESERVE_FLUSH_ALL)
369                 avail >>= 3;
370         else
371                 avail >>= 1;
372         return avail;
373 }
374
375 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
376                                        struct btrfs_space_info *space_info)
377 {
378         /*
379          * On regular filesystem, all total_bytes are always writable. On zoned
380          * filesystem, there may be a limitation imposed by max_active_zones.
381          * For metadata allocation, we cannot finish an existing active block
382          * group to avoid a deadlock. Thus, we need to consider only the active
383          * groups to be writable for metadata space.
384          */
385         if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
386                 return space_info->total_bytes;
387
388         return space_info->active_total_bytes;
389 }
390
391 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
392                          struct btrfs_space_info *space_info, u64 bytes,
393                          enum btrfs_reserve_flush_enum flush)
394 {
395         u64 avail;
396         u64 used;
397
398         /* Don't overcommit when in mixed mode */
399         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
400                 return 0;
401
402         used = btrfs_space_info_used(space_info, true);
403         if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
404                 avail = 0;
405         else
406                 avail = calc_available_free_space(fs_info, space_info, flush);
407
408         if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
409                 return 1;
410         return 0;
411 }
412
413 static void remove_ticket(struct btrfs_space_info *space_info,
414                           struct reserve_ticket *ticket)
415 {
416         if (!list_empty(&ticket->list)) {
417                 list_del_init(&ticket->list);
418                 ASSERT(space_info->reclaim_size >= ticket->bytes);
419                 space_info->reclaim_size -= ticket->bytes;
420         }
421 }
422
423 /*
424  * This is for space we already have accounted in space_info->bytes_may_use, so
425  * basically when we're returning space from block_rsv's.
426  */
427 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
428                                 struct btrfs_space_info *space_info)
429 {
430         struct list_head *head;
431         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
432
433         lockdep_assert_held(&space_info->lock);
434
435         head = &space_info->priority_tickets;
436 again:
437         while (!list_empty(head)) {
438                 struct reserve_ticket *ticket;
439                 u64 used = btrfs_space_info_used(space_info, true);
440
441                 ticket = list_first_entry(head, struct reserve_ticket, list);
442
443                 /* Check and see if our ticket can be satisfied now. */
444                 if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
445                     btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
446                                          flush)) {
447                         btrfs_space_info_update_bytes_may_use(fs_info,
448                                                               space_info,
449                                                               ticket->bytes);
450                         remove_ticket(space_info, ticket);
451                         ticket->bytes = 0;
452                         space_info->tickets_id++;
453                         wake_up(&ticket->wait);
454                 } else {
455                         break;
456                 }
457         }
458
459         if (head == &space_info->priority_tickets) {
460                 head = &space_info->tickets;
461                 flush = BTRFS_RESERVE_FLUSH_ALL;
462                 goto again;
463         }
464 }
465
466 #define DUMP_BLOCK_RSV(fs_info, rsv_name)                               \
467 do {                                                                    \
468         struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;           \
469         spin_lock(&__rsv->lock);                                        \
470         btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",      \
471                    __rsv->size, __rsv->reserved);                       \
472         spin_unlock(&__rsv->lock);                                      \
473 } while (0)
474
475 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
476                                     struct btrfs_space_info *info)
477 {
478         lockdep_assert_held(&info->lock);
479
480         /* The free space could be negative in case of overcommit */
481         btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
482                    info->flags,
483                    (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
484                    info->full ? "" : "not ");
485         btrfs_info(fs_info,
486                 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
487                 info->total_bytes, info->bytes_used, info->bytes_pinned,
488                 info->bytes_reserved, info->bytes_may_use,
489                 info->bytes_readonly, info->bytes_zone_unusable);
490
491         DUMP_BLOCK_RSV(fs_info, global_block_rsv);
492         DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
493         DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
494         DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
495         DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
496
497 }
498
499 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
500                            struct btrfs_space_info *info, u64 bytes,
501                            int dump_block_groups)
502 {
503         struct btrfs_block_group *cache;
504         int index = 0;
505
506         spin_lock(&info->lock);
507         __btrfs_dump_space_info(fs_info, info);
508         spin_unlock(&info->lock);
509
510         if (!dump_block_groups)
511                 return;
512
513         down_read(&info->groups_sem);
514 again:
515         list_for_each_entry(cache, &info->block_groups[index], list) {
516                 spin_lock(&cache->lock);
517                 btrfs_info(fs_info,
518                         "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
519                         cache->start, cache->length, cache->used, cache->pinned,
520                         cache->reserved, cache->zone_unusable,
521                         cache->ro ? "[readonly]" : "");
522                 spin_unlock(&cache->lock);
523                 btrfs_dump_free_space(cache, bytes);
524         }
525         if (++index < BTRFS_NR_RAID_TYPES)
526                 goto again;
527         up_read(&info->groups_sem);
528 }
529
530 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
531                                         u64 to_reclaim)
532 {
533         u64 bytes;
534         u64 nr;
535
536         bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
537         nr = div64_u64(to_reclaim, bytes);
538         if (!nr)
539                 nr = 1;
540         return nr;
541 }
542
543 #define EXTENT_SIZE_PER_ITEM    SZ_256K
544
545 /*
546  * shrink metadata reservation for delalloc
547  */
548 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
549                             struct btrfs_space_info *space_info,
550                             u64 to_reclaim, bool wait_ordered,
551                             bool for_preempt)
552 {
553         struct btrfs_trans_handle *trans;
554         u64 delalloc_bytes;
555         u64 ordered_bytes;
556         u64 items;
557         long time_left;
558         int loops;
559
560         delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
561         ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
562         if (delalloc_bytes == 0 && ordered_bytes == 0)
563                 return;
564
565         /* Calc the number of the pages we need flush for space reservation */
566         if (to_reclaim == U64_MAX) {
567                 items = U64_MAX;
568         } else {
569                 /*
570                  * to_reclaim is set to however much metadata we need to
571                  * reclaim, but reclaiming that much data doesn't really track
572                  * exactly.  What we really want to do is reclaim full inode's
573                  * worth of reservations, however that's not available to us
574                  * here.  We will take a fraction of the delalloc bytes for our
575                  * flushing loops and hope for the best.  Delalloc will expand
576                  * the amount we write to cover an entire dirty extent, which
577                  * will reclaim the metadata reservation for that range.  If
578                  * it's not enough subsequent flush stages will be more
579                  * aggressive.
580                  */
581                 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
582                 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
583         }
584
585         trans = current->journal_info;
586
587         /*
588          * If we are doing more ordered than delalloc we need to just wait on
589          * ordered extents, otherwise we'll waste time trying to flush delalloc
590          * that likely won't give us the space back we need.
591          */
592         if (ordered_bytes > delalloc_bytes && !for_preempt)
593                 wait_ordered = true;
594
595         loops = 0;
596         while ((delalloc_bytes || ordered_bytes) && loops < 3) {
597                 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
598                 long nr_pages = min_t(u64, temp, LONG_MAX);
599                 int async_pages;
600
601                 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
602
603                 /*
604                  * We need to make sure any outstanding async pages are now
605                  * processed before we continue.  This is because things like
606                  * sync_inode() try to be smart and skip writing if the inode is
607                  * marked clean.  We don't use filemap_fwrite for flushing
608                  * because we want to control how many pages we write out at a
609                  * time, thus this is the only safe way to make sure we've
610                  * waited for outstanding compressed workers to have started
611                  * their jobs and thus have ordered extents set up properly.
612                  *
613                  * This exists because we do not want to wait for each
614                  * individual inode to finish its async work, we simply want to
615                  * start the IO on everybody, and then come back here and wait
616                  * for all of the async work to catch up.  Once we're done with
617                  * that we know we'll have ordered extents for everything and we
618                  * can decide if we wait for that or not.
619                  *
620                  * If we choose to replace this in the future, make absolutely
621                  * sure that the proper waiting is being done in the async case,
622                  * as there have been bugs in that area before.
623                  */
624                 async_pages = atomic_read(&fs_info->async_delalloc_pages);
625                 if (!async_pages)
626                         goto skip_async;
627
628                 /*
629                  * We don't want to wait forever, if we wrote less pages in this
630                  * loop than we have outstanding, only wait for that number of
631                  * pages, otherwise we can wait for all async pages to finish
632                  * before continuing.
633                  */
634                 if (async_pages > nr_pages)
635                         async_pages -= nr_pages;
636                 else
637                         async_pages = 0;
638                 wait_event(fs_info->async_submit_wait,
639                            atomic_read(&fs_info->async_delalloc_pages) <=
640                            async_pages);
641 skip_async:
642                 loops++;
643                 if (wait_ordered && !trans) {
644                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
645                 } else {
646                         time_left = schedule_timeout_killable(1);
647                         if (time_left)
648                                 break;
649                 }
650
651                 /*
652                  * If we are for preemption we just want a one-shot of delalloc
653                  * flushing so we can stop flushing if we decide we don't need
654                  * to anymore.
655                  */
656                 if (for_preempt)
657                         break;
658
659                 spin_lock(&space_info->lock);
660                 if (list_empty(&space_info->tickets) &&
661                     list_empty(&space_info->priority_tickets)) {
662                         spin_unlock(&space_info->lock);
663                         break;
664                 }
665                 spin_unlock(&space_info->lock);
666
667                 delalloc_bytes = percpu_counter_sum_positive(
668                                                 &fs_info->delalloc_bytes);
669                 ordered_bytes = percpu_counter_sum_positive(
670                                                 &fs_info->ordered_bytes);
671         }
672 }
673
674 /*
675  * Try to flush some data based on policy set by @state. This is only advisory
676  * and may fail for various reasons. The caller is supposed to examine the
677  * state of @space_info to detect the outcome.
678  */
679 static void flush_space(struct btrfs_fs_info *fs_info,
680                        struct btrfs_space_info *space_info, u64 num_bytes,
681                        enum btrfs_flush_state state, bool for_preempt)
682 {
683         struct btrfs_root *root = fs_info->tree_root;
684         struct btrfs_trans_handle *trans;
685         int nr;
686         int ret = 0;
687
688         switch (state) {
689         case FLUSH_DELAYED_ITEMS_NR:
690         case FLUSH_DELAYED_ITEMS:
691                 if (state == FLUSH_DELAYED_ITEMS_NR)
692                         nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
693                 else
694                         nr = -1;
695
696                 trans = btrfs_join_transaction(root);
697                 if (IS_ERR(trans)) {
698                         ret = PTR_ERR(trans);
699                         break;
700                 }
701                 ret = btrfs_run_delayed_items_nr(trans, nr);
702                 btrfs_end_transaction(trans);
703                 break;
704         case FLUSH_DELALLOC:
705         case FLUSH_DELALLOC_WAIT:
706         case FLUSH_DELALLOC_FULL:
707                 if (state == FLUSH_DELALLOC_FULL)
708                         num_bytes = U64_MAX;
709                 shrink_delalloc(fs_info, space_info, num_bytes,
710                                 state != FLUSH_DELALLOC, for_preempt);
711                 break;
712         case FLUSH_DELAYED_REFS_NR:
713         case FLUSH_DELAYED_REFS:
714                 trans = btrfs_join_transaction(root);
715                 if (IS_ERR(trans)) {
716                         ret = PTR_ERR(trans);
717                         break;
718                 }
719                 if (state == FLUSH_DELAYED_REFS_NR)
720                         nr = calc_reclaim_items_nr(fs_info, num_bytes);
721                 else
722                         nr = 0;
723                 btrfs_run_delayed_refs(trans, nr);
724                 btrfs_end_transaction(trans);
725                 break;
726         case ALLOC_CHUNK:
727         case ALLOC_CHUNK_FORCE:
728                 /*
729                  * For metadata space on zoned filesystem, reaching here means we
730                  * don't have enough space left in active_total_bytes. Try to
731                  * activate a block group first, because we may have inactive
732                  * block group already allocated.
733                  */
734                 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
735                 if (ret < 0)
736                         break;
737                 else if (ret == 1)
738                         break;
739
740                 trans = btrfs_join_transaction(root);
741                 if (IS_ERR(trans)) {
742                         ret = PTR_ERR(trans);
743                         break;
744                 }
745                 ret = btrfs_chunk_alloc(trans,
746                                 btrfs_get_alloc_profile(fs_info, space_info->flags),
747                                 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
748                                         CHUNK_ALLOC_FORCE);
749                 btrfs_end_transaction(trans);
750
751                 /*
752                  * For metadata space on zoned filesystem, allocating a new chunk
753                  * is not enough. We still need to activate the block * group.
754                  * Active the newly allocated block group by (maybe) finishing
755                  * a block group.
756                  */
757                 if (ret == 1) {
758                         ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
759                         /*
760                          * Revert to the original ret regardless we could finish
761                          * one block group or not.
762                          */
763                         if (ret >= 0)
764                                 ret = 1;
765                 }
766
767                 if (ret > 0 || ret == -ENOSPC)
768                         ret = 0;
769                 break;
770         case RUN_DELAYED_IPUTS:
771                 /*
772                  * If we have pending delayed iputs then we could free up a
773                  * bunch of pinned space, so make sure we run the iputs before
774                  * we do our pinned bytes check below.
775                  */
776                 btrfs_run_delayed_iputs(fs_info);
777                 btrfs_wait_on_delayed_iputs(fs_info);
778                 break;
779         case COMMIT_TRANS:
780                 ASSERT(current->journal_info == NULL);
781                 trans = btrfs_join_transaction(root);
782                 if (IS_ERR(trans)) {
783                         ret = PTR_ERR(trans);
784                         break;
785                 }
786                 ret = btrfs_commit_transaction(trans);
787                 break;
788         default:
789                 ret = -ENOSPC;
790                 break;
791         }
792
793         trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
794                                 ret, for_preempt);
795         return;
796 }
797
798 static inline u64
799 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
800                                  struct btrfs_space_info *space_info)
801 {
802         u64 used;
803         u64 avail;
804         u64 total;
805         u64 to_reclaim = space_info->reclaim_size;
806
807         lockdep_assert_held(&space_info->lock);
808
809         avail = calc_available_free_space(fs_info, space_info,
810                                           BTRFS_RESERVE_FLUSH_ALL);
811         used = btrfs_space_info_used(space_info, true);
812
813         /*
814          * We may be flushing because suddenly we have less space than we had
815          * before, and now we're well over-committed based on our current free
816          * space.  If that's the case add in our overage so we make sure to put
817          * appropriate pressure on the flushing state machine.
818          */
819         total = writable_total_bytes(fs_info, space_info);
820         if (total + avail < used)
821                 to_reclaim += used - (total + avail);
822
823         return to_reclaim;
824 }
825
826 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
827                                     struct btrfs_space_info *space_info)
828 {
829         u64 global_rsv_size = fs_info->global_block_rsv.reserved;
830         u64 ordered, delalloc;
831         u64 total = writable_total_bytes(fs_info, space_info);
832         u64 thresh;
833         u64 used;
834
835         thresh = div_factor_fine(total, 90);
836
837         lockdep_assert_held(&space_info->lock);
838
839         /* If we're just plain full then async reclaim just slows us down. */
840         if ((space_info->bytes_used + space_info->bytes_reserved +
841              global_rsv_size) >= thresh)
842                 return false;
843
844         used = space_info->bytes_may_use + space_info->bytes_pinned;
845
846         /* The total flushable belongs to the global rsv, don't flush. */
847         if (global_rsv_size >= used)
848                 return false;
849
850         /*
851          * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
852          * that devoted to other reservations then there's no sense in flushing,
853          * we don't have a lot of things that need flushing.
854          */
855         if (used - global_rsv_size <= SZ_128M)
856                 return false;
857
858         /*
859          * We have tickets queued, bail so we don't compete with the async
860          * flushers.
861          */
862         if (space_info->reclaim_size)
863                 return false;
864
865         /*
866          * If we have over half of the free space occupied by reservations or
867          * pinned then we want to start flushing.
868          *
869          * We do not do the traditional thing here, which is to say
870          *
871          *   if (used >= ((total_bytes + avail) / 2))
872          *     return 1;
873          *
874          * because this doesn't quite work how we want.  If we had more than 50%
875          * of the space_info used by bytes_used and we had 0 available we'd just
876          * constantly run the background flusher.  Instead we want it to kick in
877          * if our reclaimable space exceeds our clamped free space.
878          *
879          * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
880          * the following:
881          *
882          * Amount of RAM        Minimum threshold       Maximum threshold
883          *
884          *        256GiB                     1GiB                  128GiB
885          *        128GiB                   512MiB                   64GiB
886          *         64GiB                   256MiB                   32GiB
887          *         32GiB                   128MiB                   16GiB
888          *         16GiB                    64MiB                    8GiB
889          *
890          * These are the range our thresholds will fall in, corresponding to how
891          * much delalloc we need for the background flusher to kick in.
892          */
893
894         thresh = calc_available_free_space(fs_info, space_info,
895                                            BTRFS_RESERVE_FLUSH_ALL);
896         used = space_info->bytes_used + space_info->bytes_reserved +
897                space_info->bytes_readonly + global_rsv_size;
898         if (used < total)
899                 thresh += total - used;
900         thresh >>= space_info->clamp;
901
902         used = space_info->bytes_pinned;
903
904         /*
905          * If we have more ordered bytes than delalloc bytes then we're either
906          * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
907          * around.  Preemptive flushing is only useful in that it can free up
908          * space before tickets need to wait for things to finish.  In the case
909          * of ordered extents, preemptively waiting on ordered extents gets us
910          * nothing, if our reservations are tied up in ordered extents we'll
911          * simply have to slow down writers by forcing them to wait on ordered
912          * extents.
913          *
914          * In the case that ordered is larger than delalloc, only include the
915          * block reserves that we would actually be able to directly reclaim
916          * from.  In this case if we're heavy on metadata operations this will
917          * clearly be heavy enough to warrant preemptive flushing.  In the case
918          * of heavy DIO or ordered reservations, preemptive flushing will just
919          * waste time and cause us to slow down.
920          *
921          * We want to make sure we truly are maxed out on ordered however, so
922          * cut ordered in half, and if it's still higher than delalloc then we
923          * can keep flushing.  This is to avoid the case where we start
924          * flushing, and now delalloc == ordered and we stop preemptively
925          * flushing when we could still have several gigs of delalloc to flush.
926          */
927         ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
928         delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
929         if (ordered >= delalloc)
930                 used += fs_info->delayed_refs_rsv.reserved +
931                         fs_info->delayed_block_rsv.reserved;
932         else
933                 used += space_info->bytes_may_use - global_rsv_size;
934
935         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
936                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
937 }
938
939 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
940                                   struct btrfs_space_info *space_info,
941                                   struct reserve_ticket *ticket)
942 {
943         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
944         u64 min_bytes;
945
946         if (!ticket->steal)
947                 return false;
948
949         if (global_rsv->space_info != space_info)
950                 return false;
951
952         spin_lock(&global_rsv->lock);
953         min_bytes = div_factor(global_rsv->size, 1);
954         if (global_rsv->reserved < min_bytes + ticket->bytes) {
955                 spin_unlock(&global_rsv->lock);
956                 return false;
957         }
958         global_rsv->reserved -= ticket->bytes;
959         remove_ticket(space_info, ticket);
960         ticket->bytes = 0;
961         wake_up(&ticket->wait);
962         space_info->tickets_id++;
963         if (global_rsv->reserved < global_rsv->size)
964                 global_rsv->full = 0;
965         spin_unlock(&global_rsv->lock);
966
967         return true;
968 }
969
970 /*
971  * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
972  * @fs_info - fs_info for this fs
973  * @space_info - the space info we were flushing
974  *
975  * We call this when we've exhausted our flushing ability and haven't made
976  * progress in satisfying tickets.  The reservation code handles tickets in
977  * order, so if there is a large ticket first and then smaller ones we could
978  * very well satisfy the smaller tickets.  This will attempt to wake up any
979  * tickets in the list to catch this case.
980  *
981  * This function returns true if it was able to make progress by clearing out
982  * other tickets, or if it stumbles across a ticket that was smaller than the
983  * first ticket.
984  */
985 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
986                                    struct btrfs_space_info *space_info)
987 {
988         struct reserve_ticket *ticket;
989         u64 tickets_id = space_info->tickets_id;
990         const bool aborted = BTRFS_FS_ERROR(fs_info);
991
992         trace_btrfs_fail_all_tickets(fs_info, space_info);
993
994         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
995                 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
996                 __btrfs_dump_space_info(fs_info, space_info);
997         }
998
999         while (!list_empty(&space_info->tickets) &&
1000                tickets_id == space_info->tickets_id) {
1001                 ticket = list_first_entry(&space_info->tickets,
1002                                           struct reserve_ticket, list);
1003
1004                 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1005                         return true;
1006
1007                 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1008                         btrfs_info(fs_info, "failing ticket with %llu bytes",
1009                                    ticket->bytes);
1010
1011                 remove_ticket(space_info, ticket);
1012                 if (aborted)
1013                         ticket->error = -EIO;
1014                 else
1015                         ticket->error = -ENOSPC;
1016                 wake_up(&ticket->wait);
1017
1018                 /*
1019                  * We're just throwing tickets away, so more flushing may not
1020                  * trip over btrfs_try_granting_tickets, so we need to call it
1021                  * here to see if we can make progress with the next ticket in
1022                  * the list.
1023                  */
1024                 if (!aborted)
1025                         btrfs_try_granting_tickets(fs_info, space_info);
1026         }
1027         return (tickets_id != space_info->tickets_id);
1028 }
1029
1030 /*
1031  * This is for normal flushers, we can wait all goddamned day if we want to.  We
1032  * will loop and continuously try to flush as long as we are making progress.
1033  * We count progress as clearing off tickets each time we have to loop.
1034  */
1035 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1036 {
1037         struct btrfs_fs_info *fs_info;
1038         struct btrfs_space_info *space_info;
1039         u64 to_reclaim;
1040         enum btrfs_flush_state flush_state;
1041         int commit_cycles = 0;
1042         u64 last_tickets_id;
1043
1044         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1045         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1046
1047         spin_lock(&space_info->lock);
1048         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1049         if (!to_reclaim) {
1050                 space_info->flush = 0;
1051                 spin_unlock(&space_info->lock);
1052                 return;
1053         }
1054         last_tickets_id = space_info->tickets_id;
1055         spin_unlock(&space_info->lock);
1056
1057         flush_state = FLUSH_DELAYED_ITEMS_NR;
1058         do {
1059                 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1060                 spin_lock(&space_info->lock);
1061                 if (list_empty(&space_info->tickets)) {
1062                         space_info->flush = 0;
1063                         spin_unlock(&space_info->lock);
1064                         return;
1065                 }
1066                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1067                                                               space_info);
1068                 if (last_tickets_id == space_info->tickets_id) {
1069                         flush_state++;
1070                 } else {
1071                         last_tickets_id = space_info->tickets_id;
1072                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1073                         if (commit_cycles)
1074                                 commit_cycles--;
1075                 }
1076
1077                 /*
1078                  * We do not want to empty the system of delalloc unless we're
1079                  * under heavy pressure, so allow one trip through the flushing
1080                  * logic before we start doing a FLUSH_DELALLOC_FULL.
1081                  */
1082                 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1083                         flush_state++;
1084
1085                 /*
1086                  * We don't want to force a chunk allocation until we've tried
1087                  * pretty hard to reclaim space.  Think of the case where we
1088                  * freed up a bunch of space and so have a lot of pinned space
1089                  * to reclaim.  We would rather use that than possibly create a
1090                  * underutilized metadata chunk.  So if this is our first run
1091                  * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1092                  * commit the transaction.  If nothing has changed the next go
1093                  * around then we can force a chunk allocation.
1094                  */
1095                 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1096                         flush_state++;
1097
1098                 if (flush_state > COMMIT_TRANS) {
1099                         commit_cycles++;
1100                         if (commit_cycles > 2) {
1101                                 if (maybe_fail_all_tickets(fs_info, space_info)) {
1102                                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1103                                         commit_cycles--;
1104                                 } else {
1105                                         space_info->flush = 0;
1106                                 }
1107                         } else {
1108                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
1109                         }
1110                 }
1111                 spin_unlock(&space_info->lock);
1112         } while (flush_state <= COMMIT_TRANS);
1113 }
1114
1115 /*
1116  * This handles pre-flushing of metadata space before we get to the point that
1117  * we need to start blocking threads on tickets.  The logic here is different
1118  * from the other flush paths because it doesn't rely on tickets to tell us how
1119  * much we need to flush, instead it attempts to keep us below the 80% full
1120  * watermark of space by flushing whichever reservation pool is currently the
1121  * largest.
1122  */
1123 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1124 {
1125         struct btrfs_fs_info *fs_info;
1126         struct btrfs_space_info *space_info;
1127         struct btrfs_block_rsv *delayed_block_rsv;
1128         struct btrfs_block_rsv *delayed_refs_rsv;
1129         struct btrfs_block_rsv *global_rsv;
1130         struct btrfs_block_rsv *trans_rsv;
1131         int loops = 0;
1132
1133         fs_info = container_of(work, struct btrfs_fs_info,
1134                                preempt_reclaim_work);
1135         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1136         delayed_block_rsv = &fs_info->delayed_block_rsv;
1137         delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1138         global_rsv = &fs_info->global_block_rsv;
1139         trans_rsv = &fs_info->trans_block_rsv;
1140
1141         spin_lock(&space_info->lock);
1142         while (need_preemptive_reclaim(fs_info, space_info)) {
1143                 enum btrfs_flush_state flush;
1144                 u64 delalloc_size = 0;
1145                 u64 to_reclaim, block_rsv_size;
1146                 u64 global_rsv_size = global_rsv->reserved;
1147
1148                 loops++;
1149
1150                 /*
1151                  * We don't have a precise counter for the metadata being
1152                  * reserved for delalloc, so we'll approximate it by subtracting
1153                  * out the block rsv's space from the bytes_may_use.  If that
1154                  * amount is higher than the individual reserves, then we can
1155                  * assume it's tied up in delalloc reservations.
1156                  */
1157                 block_rsv_size = global_rsv_size +
1158                         delayed_block_rsv->reserved +
1159                         delayed_refs_rsv->reserved +
1160                         trans_rsv->reserved;
1161                 if (block_rsv_size < space_info->bytes_may_use)
1162                         delalloc_size = space_info->bytes_may_use - block_rsv_size;
1163
1164                 /*
1165                  * We don't want to include the global_rsv in our calculation,
1166                  * because that's space we can't touch.  Subtract it from the
1167                  * block_rsv_size for the next checks.
1168                  */
1169                 block_rsv_size -= global_rsv_size;
1170
1171                 /*
1172                  * We really want to avoid flushing delalloc too much, as it
1173                  * could result in poor allocation patterns, so only flush it if
1174                  * it's larger than the rest of the pools combined.
1175                  */
1176                 if (delalloc_size > block_rsv_size) {
1177                         to_reclaim = delalloc_size;
1178                         flush = FLUSH_DELALLOC;
1179                 } else if (space_info->bytes_pinned >
1180                            (delayed_block_rsv->reserved +
1181                             delayed_refs_rsv->reserved)) {
1182                         to_reclaim = space_info->bytes_pinned;
1183                         flush = COMMIT_TRANS;
1184                 } else if (delayed_block_rsv->reserved >
1185                            delayed_refs_rsv->reserved) {
1186                         to_reclaim = delayed_block_rsv->reserved;
1187                         flush = FLUSH_DELAYED_ITEMS_NR;
1188                 } else {
1189                         to_reclaim = delayed_refs_rsv->reserved;
1190                         flush = FLUSH_DELAYED_REFS_NR;
1191                 }
1192
1193                 spin_unlock(&space_info->lock);
1194
1195                 /*
1196                  * We don't want to reclaim everything, just a portion, so scale
1197                  * down the to_reclaim by 1/4.  If it takes us down to 0,
1198                  * reclaim 1 items worth.
1199                  */
1200                 to_reclaim >>= 2;
1201                 if (!to_reclaim)
1202                         to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1203                 flush_space(fs_info, space_info, to_reclaim, flush, true);
1204                 cond_resched();
1205                 spin_lock(&space_info->lock);
1206         }
1207
1208         /* We only went through once, back off our clamping. */
1209         if (loops == 1 && !space_info->reclaim_size)
1210                 space_info->clamp = max(1, space_info->clamp - 1);
1211         trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1212         spin_unlock(&space_info->lock);
1213 }
1214
1215 /*
1216  * FLUSH_DELALLOC_WAIT:
1217  *   Space is freed from flushing delalloc in one of two ways.
1218  *
1219  *   1) compression is on and we allocate less space than we reserved
1220  *   2) we are overwriting existing space
1221  *
1222  *   For #1 that extra space is reclaimed as soon as the delalloc pages are
1223  *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1224  *   length to ->bytes_reserved, and subtracts the reserved space from
1225  *   ->bytes_may_use.
1226  *
1227  *   For #2 this is trickier.  Once the ordered extent runs we will drop the
1228  *   extent in the range we are overwriting, which creates a delayed ref for
1229  *   that freed extent.  This however is not reclaimed until the transaction
1230  *   commits, thus the next stages.
1231  *
1232  * RUN_DELAYED_IPUTS
1233  *   If we are freeing inodes, we want to make sure all delayed iputs have
1234  *   completed, because they could have been on an inode with i_nlink == 0, and
1235  *   thus have been truncated and freed up space.  But again this space is not
1236  *   immediately re-usable, it comes in the form of a delayed ref, which must be
1237  *   run and then the transaction must be committed.
1238  *
1239  * COMMIT_TRANS
1240  *   This is where we reclaim all of the pinned space generated by running the
1241  *   iputs
1242  *
1243  * ALLOC_CHUNK_FORCE
1244  *   For data we start with alloc chunk force, however we could have been full
1245  *   before, and then the transaction commit could have freed new block groups,
1246  *   so if we now have space to allocate do the force chunk allocation.
1247  */
1248 static const enum btrfs_flush_state data_flush_states[] = {
1249         FLUSH_DELALLOC_FULL,
1250         RUN_DELAYED_IPUTS,
1251         COMMIT_TRANS,
1252         ALLOC_CHUNK_FORCE,
1253 };
1254
1255 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1256 {
1257         struct btrfs_fs_info *fs_info;
1258         struct btrfs_space_info *space_info;
1259         u64 last_tickets_id;
1260         enum btrfs_flush_state flush_state = 0;
1261
1262         fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1263         space_info = fs_info->data_sinfo;
1264
1265         spin_lock(&space_info->lock);
1266         if (list_empty(&space_info->tickets)) {
1267                 space_info->flush = 0;
1268                 spin_unlock(&space_info->lock);
1269                 return;
1270         }
1271         last_tickets_id = space_info->tickets_id;
1272         spin_unlock(&space_info->lock);
1273
1274         while (!space_info->full) {
1275                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1276                 spin_lock(&space_info->lock);
1277                 if (list_empty(&space_info->tickets)) {
1278                         space_info->flush = 0;
1279                         spin_unlock(&space_info->lock);
1280                         return;
1281                 }
1282
1283                 /* Something happened, fail everything and bail. */
1284                 if (BTRFS_FS_ERROR(fs_info))
1285                         goto aborted_fs;
1286                 last_tickets_id = space_info->tickets_id;
1287                 spin_unlock(&space_info->lock);
1288         }
1289
1290         while (flush_state < ARRAY_SIZE(data_flush_states)) {
1291                 flush_space(fs_info, space_info, U64_MAX,
1292                             data_flush_states[flush_state], false);
1293                 spin_lock(&space_info->lock);
1294                 if (list_empty(&space_info->tickets)) {
1295                         space_info->flush = 0;
1296                         spin_unlock(&space_info->lock);
1297                         return;
1298                 }
1299
1300                 if (last_tickets_id == space_info->tickets_id) {
1301                         flush_state++;
1302                 } else {
1303                         last_tickets_id = space_info->tickets_id;
1304                         flush_state = 0;
1305                 }
1306
1307                 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1308                         if (space_info->full) {
1309                                 if (maybe_fail_all_tickets(fs_info, space_info))
1310                                         flush_state = 0;
1311                                 else
1312                                         space_info->flush = 0;
1313                         } else {
1314                                 flush_state = 0;
1315                         }
1316
1317                         /* Something happened, fail everything and bail. */
1318                         if (BTRFS_FS_ERROR(fs_info))
1319                                 goto aborted_fs;
1320
1321                 }
1322                 spin_unlock(&space_info->lock);
1323         }
1324         return;
1325
1326 aborted_fs:
1327         maybe_fail_all_tickets(fs_info, space_info);
1328         space_info->flush = 0;
1329         spin_unlock(&space_info->lock);
1330 }
1331
1332 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1333 {
1334         INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1335         INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1336         INIT_WORK(&fs_info->preempt_reclaim_work,
1337                   btrfs_preempt_reclaim_metadata_space);
1338 }
1339
1340 static const enum btrfs_flush_state priority_flush_states[] = {
1341         FLUSH_DELAYED_ITEMS_NR,
1342         FLUSH_DELAYED_ITEMS,
1343         ALLOC_CHUNK,
1344 };
1345
1346 static const enum btrfs_flush_state evict_flush_states[] = {
1347         FLUSH_DELAYED_ITEMS_NR,
1348         FLUSH_DELAYED_ITEMS,
1349         FLUSH_DELAYED_REFS_NR,
1350         FLUSH_DELAYED_REFS,
1351         FLUSH_DELALLOC,
1352         FLUSH_DELALLOC_WAIT,
1353         FLUSH_DELALLOC_FULL,
1354         ALLOC_CHUNK,
1355         COMMIT_TRANS,
1356 };
1357
1358 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1359                                 struct btrfs_space_info *space_info,
1360                                 struct reserve_ticket *ticket,
1361                                 const enum btrfs_flush_state *states,
1362                                 int states_nr)
1363 {
1364         u64 to_reclaim;
1365         int flush_state = 0;
1366
1367         spin_lock(&space_info->lock);
1368         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1369         /*
1370          * This is the priority reclaim path, so to_reclaim could be >0 still
1371          * because we may have only satisfied the priority tickets and still
1372          * left non priority tickets on the list.  We would then have
1373          * to_reclaim but ->bytes == 0.
1374          */
1375         if (ticket->bytes == 0) {
1376                 spin_unlock(&space_info->lock);
1377                 return;
1378         }
1379
1380         while (flush_state < states_nr) {
1381                 spin_unlock(&space_info->lock);
1382                 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1383                             false);
1384                 flush_state++;
1385                 spin_lock(&space_info->lock);
1386                 if (ticket->bytes == 0) {
1387                         spin_unlock(&space_info->lock);
1388                         return;
1389                 }
1390         }
1391
1392         /* Attempt to steal from the global rsv if we can. */
1393         if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1394                 ticket->error = -ENOSPC;
1395                 remove_ticket(space_info, ticket);
1396         }
1397
1398         /*
1399          * We must run try_granting_tickets here because we could be a large
1400          * ticket in front of a smaller ticket that can now be satisfied with
1401          * the available space.
1402          */
1403         btrfs_try_granting_tickets(fs_info, space_info);
1404         spin_unlock(&space_info->lock);
1405 }
1406
1407 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1408                                         struct btrfs_space_info *space_info,
1409                                         struct reserve_ticket *ticket)
1410 {
1411         spin_lock(&space_info->lock);
1412
1413         /* We could have been granted before we got here. */
1414         if (ticket->bytes == 0) {
1415                 spin_unlock(&space_info->lock);
1416                 return;
1417         }
1418
1419         while (!space_info->full) {
1420                 spin_unlock(&space_info->lock);
1421                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1422                 spin_lock(&space_info->lock);
1423                 if (ticket->bytes == 0) {
1424                         spin_unlock(&space_info->lock);
1425                         return;
1426                 }
1427         }
1428
1429         ticket->error = -ENOSPC;
1430         remove_ticket(space_info, ticket);
1431         btrfs_try_granting_tickets(fs_info, space_info);
1432         spin_unlock(&space_info->lock);
1433 }
1434
1435 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1436                                 struct btrfs_space_info *space_info,
1437                                 struct reserve_ticket *ticket)
1438
1439 {
1440         DEFINE_WAIT(wait);
1441         int ret = 0;
1442
1443         spin_lock(&space_info->lock);
1444         while (ticket->bytes > 0 && ticket->error == 0) {
1445                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1446                 if (ret) {
1447                         /*
1448                          * Delete us from the list. After we unlock the space
1449                          * info, we don't want the async reclaim job to reserve
1450                          * space for this ticket. If that would happen, then the
1451                          * ticket's task would not known that space was reserved
1452                          * despite getting an error, resulting in a space leak
1453                          * (bytes_may_use counter of our space_info).
1454                          */
1455                         remove_ticket(space_info, ticket);
1456                         ticket->error = -EINTR;
1457                         break;
1458                 }
1459                 spin_unlock(&space_info->lock);
1460
1461                 schedule();
1462
1463                 finish_wait(&ticket->wait, &wait);
1464                 spin_lock(&space_info->lock);
1465         }
1466         spin_unlock(&space_info->lock);
1467 }
1468
1469 /**
1470  * Do the appropriate flushing and waiting for a ticket
1471  *
1472  * @fs_info:    the filesystem
1473  * @space_info: space info for the reservation
1474  * @ticket:     ticket for the reservation
1475  * @start_ns:   timestamp when the reservation started
1476  * @orig_bytes: amount of bytes originally reserved
1477  * @flush:      how much we can flush
1478  *
1479  * This does the work of figuring out how to flush for the ticket, waiting for
1480  * the reservation, and returning the appropriate error if there is one.
1481  */
1482 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1483                                  struct btrfs_space_info *space_info,
1484                                  struct reserve_ticket *ticket,
1485                                  u64 start_ns, u64 orig_bytes,
1486                                  enum btrfs_reserve_flush_enum flush)
1487 {
1488         int ret;
1489
1490         switch (flush) {
1491         case BTRFS_RESERVE_FLUSH_DATA:
1492         case BTRFS_RESERVE_FLUSH_ALL:
1493         case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1494                 wait_reserve_ticket(fs_info, space_info, ticket);
1495                 break;
1496         case BTRFS_RESERVE_FLUSH_LIMIT:
1497                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1498                                                 priority_flush_states,
1499                                                 ARRAY_SIZE(priority_flush_states));
1500                 break;
1501         case BTRFS_RESERVE_FLUSH_EVICT:
1502                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1503                                                 evict_flush_states,
1504                                                 ARRAY_SIZE(evict_flush_states));
1505                 break;
1506         case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1507                 priority_reclaim_data_space(fs_info, space_info, ticket);
1508                 break;
1509         default:
1510                 ASSERT(0);
1511                 break;
1512         }
1513
1514         ret = ticket->error;
1515         ASSERT(list_empty(&ticket->list));
1516         /*
1517          * Check that we can't have an error set if the reservation succeeded,
1518          * as that would confuse tasks and lead them to error out without
1519          * releasing reserved space (if an error happens the expectation is that
1520          * space wasn't reserved at all).
1521          */
1522         ASSERT(!(ticket->bytes == 0 && ticket->error));
1523         trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1524                                    start_ns, flush, ticket->error);
1525         return ret;
1526 }
1527
1528 /*
1529  * This returns true if this flush state will go through the ordinary flushing
1530  * code.
1531  */
1532 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1533 {
1534         return  (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1535                 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1536 }
1537
1538 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1539                                        struct btrfs_space_info *space_info)
1540 {
1541         u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1542         u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1543
1544         /*
1545          * If we're heavy on ordered operations then clamping won't help us.  We
1546          * need to clamp specifically to keep up with dirty'ing buffered
1547          * writers, because there's not a 1:1 correlation of writing delalloc
1548          * and freeing space, like there is with flushing delayed refs or
1549          * delayed nodes.  If we're already more ordered than delalloc then
1550          * we're keeping up, otherwise we aren't and should probably clamp.
1551          */
1552         if (ordered < delalloc)
1553                 space_info->clamp = min(space_info->clamp + 1, 8);
1554 }
1555
1556 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1557 {
1558         return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1559                 flush == BTRFS_RESERVE_FLUSH_EVICT);
1560 }
1561
1562 /**
1563  * Try to reserve bytes from the block_rsv's space
1564  *
1565  * @fs_info:    the filesystem
1566  * @space_info: space info we want to allocate from
1567  * @orig_bytes: number of bytes we want
1568  * @flush:      whether or not we can flush to make our reservation
1569  *
1570  * This will reserve orig_bytes number of bytes from the space info associated
1571  * with the block_rsv.  If there is not enough space it will make an attempt to
1572  * flush out space to make room.  It will do this by flushing delalloc if
1573  * possible or committing the transaction.  If flush is 0 then no attempts to
1574  * regain reservations will be made and this will fail if there is not enough
1575  * space already.
1576  */
1577 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1578                            struct btrfs_space_info *space_info, u64 orig_bytes,
1579                            enum btrfs_reserve_flush_enum flush)
1580 {
1581         struct work_struct *async_work;
1582         struct reserve_ticket ticket;
1583         u64 start_ns = 0;
1584         u64 used;
1585         int ret = 0;
1586         bool pending_tickets;
1587
1588         ASSERT(orig_bytes);
1589         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1590
1591         if (flush == BTRFS_RESERVE_FLUSH_DATA)
1592                 async_work = &fs_info->async_data_reclaim_work;
1593         else
1594                 async_work = &fs_info->async_reclaim_work;
1595
1596         spin_lock(&space_info->lock);
1597         ret = -ENOSPC;
1598         used = btrfs_space_info_used(space_info, true);
1599
1600         /*
1601          * We don't want NO_FLUSH allocations to jump everybody, they can
1602          * generally handle ENOSPC in a different way, so treat them the same as
1603          * normal flushers when it comes to skipping pending tickets.
1604          */
1605         if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1606                 pending_tickets = !list_empty(&space_info->tickets) ||
1607                         !list_empty(&space_info->priority_tickets);
1608         else
1609                 pending_tickets = !list_empty(&space_info->priority_tickets);
1610
1611         /*
1612          * Carry on if we have enough space (short-circuit) OR call
1613          * can_overcommit() to ensure we can overcommit to continue.
1614          */
1615         if (!pending_tickets &&
1616             ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
1617              btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1618                 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1619                                                       orig_bytes);
1620                 ret = 0;
1621         }
1622
1623         /*
1624          * If we couldn't make a reservation then setup our reservation ticket
1625          * and kick the async worker if it's not already running.
1626          *
1627          * If we are a priority flusher then we just need to add our ticket to
1628          * the list and we will do our own flushing further down.
1629          */
1630         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1631                 ticket.bytes = orig_bytes;
1632                 ticket.error = 0;
1633                 space_info->reclaim_size += ticket.bytes;
1634                 init_waitqueue_head(&ticket.wait);
1635                 ticket.steal = can_steal(flush);
1636                 if (trace_btrfs_reserve_ticket_enabled())
1637                         start_ns = ktime_get_ns();
1638
1639                 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1640                     flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1641                     flush == BTRFS_RESERVE_FLUSH_DATA) {
1642                         list_add_tail(&ticket.list, &space_info->tickets);
1643                         if (!space_info->flush) {
1644                                 /*
1645                                  * We were forced to add a reserve ticket, so
1646                                  * our preemptive flushing is unable to keep
1647                                  * up.  Clamp down on the threshold for the
1648                                  * preemptive flushing in order to keep up with
1649                                  * the workload.
1650                                  */
1651                                 maybe_clamp_preempt(fs_info, space_info);
1652
1653                                 space_info->flush = 1;
1654                                 trace_btrfs_trigger_flush(fs_info,
1655                                                           space_info->flags,
1656                                                           orig_bytes, flush,
1657                                                           "enospc");
1658                                 queue_work(system_unbound_wq, async_work);
1659                         }
1660                 } else {
1661                         list_add_tail(&ticket.list,
1662                                       &space_info->priority_tickets);
1663                 }
1664         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1665                 used += orig_bytes;
1666                 /*
1667                  * We will do the space reservation dance during log replay,
1668                  * which means we won't have fs_info->fs_root set, so don't do
1669                  * the async reclaim as we will panic.
1670                  */
1671                 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1672                     !work_busy(&fs_info->preempt_reclaim_work) &&
1673                     need_preemptive_reclaim(fs_info, space_info)) {
1674                         trace_btrfs_trigger_flush(fs_info, space_info->flags,
1675                                                   orig_bytes, flush, "preempt");
1676                         queue_work(system_unbound_wq,
1677                                    &fs_info->preempt_reclaim_work);
1678                 }
1679         }
1680         spin_unlock(&space_info->lock);
1681         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1682                 return ret;
1683
1684         return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1685                                      orig_bytes, flush);
1686 }
1687
1688 /**
1689  * Trye to reserve metadata bytes from the block_rsv's space
1690  *
1691  * @fs_info:    the filesystem
1692  * @block_rsv:  block_rsv we're allocating for
1693  * @orig_bytes: number of bytes we want
1694  * @flush:      whether or not we can flush to make our reservation
1695  *
1696  * This will reserve orig_bytes number of bytes from the space info associated
1697  * with the block_rsv.  If there is not enough space it will make an attempt to
1698  * flush out space to make room.  It will do this by flushing delalloc if
1699  * possible or committing the transaction.  If flush is 0 then no attempts to
1700  * regain reservations will be made and this will fail if there is not enough
1701  * space already.
1702  */
1703 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1704                                  struct btrfs_block_rsv *block_rsv,
1705                                  u64 orig_bytes,
1706                                  enum btrfs_reserve_flush_enum flush)
1707 {
1708         int ret;
1709
1710         ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1711         if (ret == -ENOSPC) {
1712                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1713                                               block_rsv->space_info->flags,
1714                                               orig_bytes, 1);
1715
1716                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1717                         btrfs_dump_space_info(fs_info, block_rsv->space_info,
1718                                               orig_bytes, 0);
1719         }
1720         return ret;
1721 }
1722
1723 /**
1724  * Try to reserve data bytes for an allocation
1725  *
1726  * @fs_info: the filesystem
1727  * @bytes:   number of bytes we need
1728  * @flush:   how we are allowed to flush
1729  *
1730  * This will reserve bytes from the data space info.  If there is not enough
1731  * space then we will attempt to flush space as specified by flush.
1732  */
1733 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1734                              enum btrfs_reserve_flush_enum flush)
1735 {
1736         struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1737         int ret;
1738
1739         ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1740                flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
1741         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1742
1743         ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1744         if (ret == -ENOSPC) {
1745                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1746                                               data_sinfo->flags, bytes, 1);
1747                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1748                         btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1749         }
1750         return ret;
1751 }