a843f964332c28adffe04d5916e9b981b82b81b1
[releases.git] / mballoc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  */
6
7
8 /*
9  * mballoc.c contains the multiblocks allocation routines
10  */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <trace/events/ext4.h>
21
22 /*
23  * MUSTDO:
24  *   - test ext4_ext_search_left() and ext4_ext_search_right()
25  *   - search for metadata in few groups
26  *
27  * TODO v4:
28  *   - normalization should take into account whether file is still open
29  *   - discard preallocations if no free space left (policy?)
30  *   - don't normalize tails
31  *   - quota
32  *   - reservation for superuser
33  *
34  * TODO v3:
35  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
36  *   - track min/max extents in each group for better group selection
37  *   - mb_mark_used() may allocate chunk right after splitting buddy
38  *   - tree of groups sorted by number of free blocks
39  *   - error handling
40  */
41
42 /*
43  * The allocation request involve request for multiple number of blocks
44  * near to the goal(block) value specified.
45  *
46  * During initialization phase of the allocator we decide to use the
47  * group preallocation or inode preallocation depending on the size of
48  * the file. The size of the file could be the resulting file size we
49  * would have after allocation, or the current file size, which ever
50  * is larger. If the size is less than sbi->s_mb_stream_request we
51  * select to use the group preallocation. The default value of
52  * s_mb_stream_request is 16 blocks. This can also be tuned via
53  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
54  * terms of number of blocks.
55  *
56  * The main motivation for having small file use group preallocation is to
57  * ensure that we have small files closer together on the disk.
58  *
59  * First stage the allocator looks at the inode prealloc list,
60  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
61  * spaces for this particular inode. The inode prealloc space is
62  * represented as:
63  *
64  * pa_lstart -> the logical start block for this prealloc space
65  * pa_pstart -> the physical start block for this prealloc space
66  * pa_len    -> length for this prealloc space (in clusters)
67  * pa_free   ->  free space available in this prealloc space (in clusters)
68  *
69  * The inode preallocation space is used looking at the _logical_ start
70  * block. If only the logical file block falls within the range of prealloc
71  * space we will consume the particular prealloc space. This makes sure that
72  * we have contiguous physical blocks representing the file blocks
73  *
74  * The important thing to be noted in case of inode prealloc space is that
75  * we don't modify the values associated to inode prealloc space except
76  * pa_free.
77  *
78  * If we are not able to find blocks in the inode prealloc space and if we
79  * have the group allocation flag set then we look at the locality group
80  * prealloc space. These are per CPU prealloc list represented as
81  *
82  * ext4_sb_info.s_locality_groups[smp_processor_id()]
83  *
84  * The reason for having a per cpu locality group is to reduce the contention
85  * between CPUs. It is possible to get scheduled at this point.
86  *
87  * The locality group prealloc space is used looking at whether we have
88  * enough free space (pa_free) within the prealloc space.
89  *
90  * If we can't allocate blocks via inode prealloc or/and locality group
91  * prealloc then we look at the buddy cache. The buddy cache is represented
92  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
93  * mapped to the buddy and bitmap information regarding different
94  * groups. The buddy information is attached to buddy cache inode so that
95  * we can access them through the page cache. The information regarding
96  * each group is loaded via ext4_mb_load_buddy.  The information involve
97  * block bitmap and buddy information. The information are stored in the
98  * inode as:
99  *
100  *  {                        page                        }
101  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
102  *
103  *
104  * one block each for bitmap and buddy information.  So for each group we
105  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
106  * blocksize) blocks.  So it can have information regarding groups_per_page
107  * which is blocks_per_page/2
108  *
109  * The buddy cache inode is not stored on disk. The inode is thrown
110  * away when the filesystem is unmounted.
111  *
112  * We look for count number of blocks in the buddy cache. If we were able
113  * to locate that many free blocks we return with additional information
114  * regarding rest of the contiguous physical block available
115  *
116  * Before allocating blocks via buddy cache we normalize the request
117  * blocks. This ensure we ask for more blocks that we needed. The extra
118  * blocks that we get after allocation is added to the respective prealloc
119  * list. In case of inode preallocation we follow a list of heuristics
120  * based on file size. This can be found in ext4_mb_normalize_request. If
121  * we are doing a group prealloc we try to normalize the request to
122  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
123  * dependent on the cluster size; for non-bigalloc file systems, it is
124  * 512 blocks. This can be tuned via
125  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
126  * terms of number of blocks. If we have mounted the file system with -O
127  * stripe=<value> option the group prealloc request is normalized to the
128  * smallest multiple of the stripe value (sbi->s_stripe) which is
129  * greater than the default mb_group_prealloc.
130  *
131  * If "mb_optimize_scan" mount option is set, we maintain in memory group info
132  * structures in two data structures:
133  *
134  * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
135  *
136  *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
137  *
138  *    This is an array of lists where the index in the array represents the
139  *    largest free order in the buddy bitmap of the participating group infos of
140  *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
141  *    number of buddy bitmap orders possible) number of lists. Group-infos are
142  *    placed in appropriate lists.
143  *
144  * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
145  *
146  *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
147  *
148  *    This is an array of lists where in the i-th list there are groups with
149  *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
150  *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
151  *    Note that we don't bother with a special list for completely empty groups
152  *    so we only have MB_NUM_ORDERS(sb) lists.
153  *
154  * When "mb_optimize_scan" mount option is set, mballoc consults the above data
155  * structures to decide the order in which groups are to be traversed for
156  * fulfilling an allocation request.
157  *
158  * At CR = 0, we look for groups which have the largest_free_order >= the order
159  * of the request. We directly look at the largest free order list in the data
160  * structure (1) above where largest_free_order = order of the request. If that
161  * list is empty, we look at remaining list in the increasing order of
162  * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
163  *
164  * At CR = 1, we only consider groups where average fragment size > request
165  * size. So, we lookup a group which has average fragment size just above or
166  * equal to request size using our average fragment size group lists (data
167  * structure 2) in O(1) time.
168  *
169  * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
170  * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
171  *
172  * The regular allocator (using the buddy cache) supports a few tunables.
173  *
174  * /sys/fs/ext4/<partition>/mb_min_to_scan
175  * /sys/fs/ext4/<partition>/mb_max_to_scan
176  * /sys/fs/ext4/<partition>/mb_order2_req
177  * /sys/fs/ext4/<partition>/mb_linear_limit
178  *
179  * The regular allocator uses buddy scan only if the request len is power of
180  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
181  * value of s_mb_order2_reqs can be tuned via
182  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
183  * stripe size (sbi->s_stripe), we try to search for contiguous block in
184  * stripe size. This should result in better allocation on RAID setups. If
185  * not, we search in the specific group using bitmap for best extents. The
186  * tunable min_to_scan and max_to_scan control the behaviour here.
187  * min_to_scan indicate how long the mballoc __must__ look for a best
188  * extent and max_to_scan indicates how long the mballoc __can__ look for a
189  * best extent in the found extents. Searching for the blocks starts with
190  * the group specified as the goal value in allocation context via
191  * ac_g_ex. Each group is first checked based on the criteria whether it
192  * can be used for allocation. ext4_mb_good_group explains how the groups are
193  * checked.
194  *
195  * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
196  * get traversed linearly. That may result in subsequent allocations being not
197  * close to each other. And so, the underlying device may get filled up in a
198  * non-linear fashion. While that may not matter on non-rotational devices, for
199  * rotational devices that may result in higher seek times. "mb_linear_limit"
200  * tells mballoc how many groups mballoc should search linearly before
201  * performing consulting above data structures for more efficient lookups. For
202  * non rotational devices, this value defaults to 0 and for rotational devices
203  * this is set to MB_DEFAULT_LINEAR_LIMIT.
204  *
205  * Both the prealloc space are getting populated as above. So for the first
206  * request we will hit the buddy cache which will result in this prealloc
207  * space getting filled. The prealloc space is then later used for the
208  * subsequent request.
209  */
210
211 /*
212  * mballoc operates on the following data:
213  *  - on-disk bitmap
214  *  - in-core buddy (actually includes buddy and bitmap)
215  *  - preallocation descriptors (PAs)
216  *
217  * there are two types of preallocations:
218  *  - inode
219  *    assiged to specific inode and can be used for this inode only.
220  *    it describes part of inode's space preallocated to specific
221  *    physical blocks. any block from that preallocated can be used
222  *    independent. the descriptor just tracks number of blocks left
223  *    unused. so, before taking some block from descriptor, one must
224  *    make sure corresponded logical block isn't allocated yet. this
225  *    also means that freeing any block within descriptor's range
226  *    must discard all preallocated blocks.
227  *  - locality group
228  *    assigned to specific locality group which does not translate to
229  *    permanent set of inodes: inode can join and leave group. space
230  *    from this type of preallocation can be used for any inode. thus
231  *    it's consumed from the beginning to the end.
232  *
233  * relation between them can be expressed as:
234  *    in-core buddy = on-disk bitmap + preallocation descriptors
235  *
236  * this mean blocks mballoc considers used are:
237  *  - allocated blocks (persistent)
238  *  - preallocated blocks (non-persistent)
239  *
240  * consistency in mballoc world means that at any time a block is either
241  * free or used in ALL structures. notice: "any time" should not be read
242  * literally -- time is discrete and delimited by locks.
243  *
244  *  to keep it simple, we don't use block numbers, instead we count number of
245  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
246  *
247  * all operations can be expressed as:
248  *  - init buddy:                       buddy = on-disk + PAs
249  *  - new PA:                           buddy += N; PA = N
250  *  - use inode PA:                     on-disk += N; PA -= N
251  *  - discard inode PA                  buddy -= on-disk - PA; PA = 0
252  *  - use locality group PA             on-disk += N; PA -= N
253  *  - discard locality group PA         buddy -= PA; PA = 0
254  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
255  *        is used in real operation because we can't know actual used
256  *        bits from PA, only from on-disk bitmap
257  *
258  * if we follow this strict logic, then all operations above should be atomic.
259  * given some of them can block, we'd have to use something like semaphores
260  * killing performance on high-end SMP hardware. let's try to relax it using
261  * the following knowledge:
262  *  1) if buddy is referenced, it's already initialized
263  *  2) while block is used in buddy and the buddy is referenced,
264  *     nobody can re-allocate that block
265  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
266  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
267  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
268  *     block
269  *
270  * so, now we're building a concurrency table:
271  *  - init buddy vs.
272  *    - new PA
273  *      blocks for PA are allocated in the buddy, buddy must be referenced
274  *      until PA is linked to allocation group to avoid concurrent buddy init
275  *    - use inode PA
276  *      we need to make sure that either on-disk bitmap or PA has uptodate data
277  *      given (3) we care that PA-=N operation doesn't interfere with init
278  *    - discard inode PA
279  *      the simplest way would be to have buddy initialized by the discard
280  *    - use locality group PA
281  *      again PA-=N must be serialized with init
282  *    - discard locality group PA
283  *      the simplest way would be to have buddy initialized by the discard
284  *  - new PA vs.
285  *    - use inode PA
286  *      i_data_sem serializes them
287  *    - discard inode PA
288  *      discard process must wait until PA isn't used by another process
289  *    - use locality group PA
290  *      some mutex should serialize them
291  *    - discard locality group PA
292  *      discard process must wait until PA isn't used by another process
293  *  - use inode PA
294  *    - use inode PA
295  *      i_data_sem or another mutex should serializes them
296  *    - discard inode PA
297  *      discard process must wait until PA isn't used by another process
298  *    - use locality group PA
299  *      nothing wrong here -- they're different PAs covering different blocks
300  *    - discard locality group PA
301  *      discard process must wait until PA isn't used by another process
302  *
303  * now we're ready to make few consequences:
304  *  - PA is referenced and while it is no discard is possible
305  *  - PA is referenced until block isn't marked in on-disk bitmap
306  *  - PA changes only after on-disk bitmap
307  *  - discard must not compete with init. either init is done before
308  *    any discard or they're serialized somehow
309  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
310  *
311  * a special case when we've used PA to emptiness. no need to modify buddy
312  * in this case, but we should care about concurrent init
313  *
314  */
315
316  /*
317  * Logic in few words:
318  *
319  *  - allocation:
320  *    load group
321  *    find blocks
322  *    mark bits in on-disk bitmap
323  *    release group
324  *
325  *  - use preallocation:
326  *    find proper PA (per-inode or group)
327  *    load group
328  *    mark bits in on-disk bitmap
329  *    release group
330  *    release PA
331  *
332  *  - free:
333  *    load group
334  *    mark bits in on-disk bitmap
335  *    release group
336  *
337  *  - discard preallocations in group:
338  *    mark PAs deleted
339  *    move them onto local list
340  *    load on-disk bitmap
341  *    load group
342  *    remove PA from object (inode or locality group)
343  *    mark free blocks in-core
344  *
345  *  - discard inode's preallocations:
346  */
347
348 /*
349  * Locking rules
350  *
351  * Locks:
352  *  - bitlock on a group        (group)
353  *  - object (inode/locality)   (object)
354  *  - per-pa lock               (pa)
355  *  - cr0 lists lock            (cr0)
356  *  - cr1 tree lock             (cr1)
357  *
358  * Paths:
359  *  - new pa
360  *    object
361  *    group
362  *
363  *  - find and use pa:
364  *    pa
365  *
366  *  - release consumed pa:
367  *    pa
368  *    group
369  *    object
370  *
371  *  - generate in-core bitmap:
372  *    group
373  *        pa
374  *
375  *  - discard all for given object (inode, locality group):
376  *    object
377  *        pa
378  *    group
379  *
380  *  - discard all for given group:
381  *    group
382  *        pa
383  *    group
384  *        object
385  *
386  *  - allocation path (ext4_mb_regular_allocator)
387  *    group
388  *    cr0/cr1
389  */
390 static struct kmem_cache *ext4_pspace_cachep;
391 static struct kmem_cache *ext4_ac_cachep;
392 static struct kmem_cache *ext4_free_data_cachep;
393
394 /* We create slab caches for groupinfo data structures based on the
395  * superblock block size.  There will be one per mounted filesystem for
396  * each unique s_blocksize_bits */
397 #define NR_GRPINFO_CACHES 8
398 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
399
400 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
401         "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
402         "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
403         "ext4_groupinfo_64k", "ext4_groupinfo_128k"
404 };
405
406 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
407                                         ext4_group_t group);
408 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
409                                                 ext4_group_t group);
410 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
411
412 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
413                                ext4_group_t group, int cr);
414
415 static int ext4_try_to_trim_range(struct super_block *sb,
416                 struct ext4_buddy *e4b, ext4_grpblk_t start,
417                 ext4_grpblk_t max, ext4_grpblk_t minblocks);
418
419 /*
420  * The algorithm using this percpu seq counter goes below:
421  * 1. We sample the percpu discard_pa_seq counter before trying for block
422  *    allocation in ext4_mb_new_blocks().
423  * 2. We increment this percpu discard_pa_seq counter when we either allocate
424  *    or free these blocks i.e. while marking those blocks as used/free in
425  *    mb_mark_used()/mb_free_blocks().
426  * 3. We also increment this percpu seq counter when we successfully identify
427  *    that the bb_prealloc_list is not empty and hence proceed for discarding
428  *    of those PAs inside ext4_mb_discard_group_preallocations().
429  *
430  * Now to make sure that the regular fast path of block allocation is not
431  * affected, as a small optimization we only sample the percpu seq counter
432  * on that cpu. Only when the block allocation fails and when freed blocks
433  * found were 0, that is when we sample percpu seq counter for all cpus using
434  * below function ext4_get_discard_pa_seq_sum(). This happens after making
435  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
436  */
437 static DEFINE_PER_CPU(u64, discard_pa_seq);
438 static inline u64 ext4_get_discard_pa_seq_sum(void)
439 {
440         int __cpu;
441         u64 __seq = 0;
442
443         for_each_possible_cpu(__cpu)
444                 __seq += per_cpu(discard_pa_seq, __cpu);
445         return __seq;
446 }
447
448 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
449 {
450 #if BITS_PER_LONG == 64
451         *bit += ((unsigned long) addr & 7UL) << 3;
452         addr = (void *) ((unsigned long) addr & ~7UL);
453 #elif BITS_PER_LONG == 32
454         *bit += ((unsigned long) addr & 3UL) << 3;
455         addr = (void *) ((unsigned long) addr & ~3UL);
456 #else
457 #error "how many bits you are?!"
458 #endif
459         return addr;
460 }
461
462 static inline int mb_test_bit(int bit, void *addr)
463 {
464         /*
465          * ext4_test_bit on architecture like powerpc
466          * needs unsigned long aligned address
467          */
468         addr = mb_correct_addr_and_bit(&bit, addr);
469         return ext4_test_bit(bit, addr);
470 }
471
472 static inline void mb_set_bit(int bit, void *addr)
473 {
474         addr = mb_correct_addr_and_bit(&bit, addr);
475         ext4_set_bit(bit, addr);
476 }
477
478 static inline void mb_clear_bit(int bit, void *addr)
479 {
480         addr = mb_correct_addr_and_bit(&bit, addr);
481         ext4_clear_bit(bit, addr);
482 }
483
484 static inline int mb_test_and_clear_bit(int bit, void *addr)
485 {
486         addr = mb_correct_addr_and_bit(&bit, addr);
487         return ext4_test_and_clear_bit(bit, addr);
488 }
489
490 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
491 {
492         int fix = 0, ret, tmpmax;
493         addr = mb_correct_addr_and_bit(&fix, addr);
494         tmpmax = max + fix;
495         start += fix;
496
497         ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
498         if (ret > max)
499                 return max;
500         return ret;
501 }
502
503 static inline int mb_find_next_bit(void *addr, int max, int start)
504 {
505         int fix = 0, ret, tmpmax;
506         addr = mb_correct_addr_and_bit(&fix, addr);
507         tmpmax = max + fix;
508         start += fix;
509
510         ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
511         if (ret > max)
512                 return max;
513         return ret;
514 }
515
516 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
517 {
518         char *bb;
519
520         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
521         BUG_ON(max == NULL);
522
523         if (order > e4b->bd_blkbits + 1) {
524                 *max = 0;
525                 return NULL;
526         }
527
528         /* at order 0 we see each particular block */
529         if (order == 0) {
530                 *max = 1 << (e4b->bd_blkbits + 3);
531                 return e4b->bd_bitmap;
532         }
533
534         bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
535         *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
536
537         return bb;
538 }
539
540 #ifdef DOUBLE_CHECK
541 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
542                            int first, int count)
543 {
544         int i;
545         struct super_block *sb = e4b->bd_sb;
546
547         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
548                 return;
549         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
550         for (i = 0; i < count; i++) {
551                 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
552                         ext4_fsblk_t blocknr;
553
554                         blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
555                         blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
556                         ext4_grp_locked_error(sb, e4b->bd_group,
557                                               inode ? inode->i_ino : 0,
558                                               blocknr,
559                                               "freeing block already freed "
560                                               "(bit %u)",
561                                               first + i);
562                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
563                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
564                 }
565                 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
566         }
567 }
568
569 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
570 {
571         int i;
572
573         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
574                 return;
575         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
576         for (i = 0; i < count; i++) {
577                 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
578                 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
579         }
580 }
581
582 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
583 {
584         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
585                 return;
586         if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
587                 unsigned char *b1, *b2;
588                 int i;
589                 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
590                 b2 = (unsigned char *) bitmap;
591                 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
592                         if (b1[i] != b2[i]) {
593                                 ext4_msg(e4b->bd_sb, KERN_ERR,
594                                          "corruption in group %u "
595                                          "at byte %u(%u): %x in copy != %x "
596                                          "on disk/prealloc",
597                                          e4b->bd_group, i, i * 8, b1[i], b2[i]);
598                                 BUG();
599                         }
600                 }
601         }
602 }
603
604 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
605                         struct ext4_group_info *grp, ext4_group_t group)
606 {
607         struct buffer_head *bh;
608
609         grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
610         if (!grp->bb_bitmap)
611                 return;
612
613         bh = ext4_read_block_bitmap(sb, group);
614         if (IS_ERR_OR_NULL(bh)) {
615                 kfree(grp->bb_bitmap);
616                 grp->bb_bitmap = NULL;
617                 return;
618         }
619
620         memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
621         put_bh(bh);
622 }
623
624 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
625 {
626         kfree(grp->bb_bitmap);
627 }
628
629 #else
630 static inline void mb_free_blocks_double(struct inode *inode,
631                                 struct ext4_buddy *e4b, int first, int count)
632 {
633         return;
634 }
635 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
636                                                 int first, int count)
637 {
638         return;
639 }
640 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
641 {
642         return;
643 }
644
645 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
646                         struct ext4_group_info *grp, ext4_group_t group)
647 {
648         return;
649 }
650
651 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
652 {
653         return;
654 }
655 #endif
656
657 #ifdef AGGRESSIVE_CHECK
658
659 #define MB_CHECK_ASSERT(assert)                                         \
660 do {                                                                    \
661         if (!(assert)) {                                                \
662                 printk(KERN_EMERG                                       \
663                         "Assertion failure in %s() at %s:%d: \"%s\"\n", \
664                         function, file, line, # assert);                \
665                 BUG();                                                  \
666         }                                                               \
667 } while (0)
668
669 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
670                                 const char *function, int line)
671 {
672         struct super_block *sb = e4b->bd_sb;
673         int order = e4b->bd_blkbits + 1;
674         int max;
675         int max2;
676         int i;
677         int j;
678         int k;
679         int count;
680         struct ext4_group_info *grp;
681         int fragments = 0;
682         int fstart;
683         struct list_head *cur;
684         void *buddy;
685         void *buddy2;
686
687         if (e4b->bd_info->bb_check_counter++ % 10)
688                 return 0;
689
690         while (order > 1) {
691                 buddy = mb_find_buddy(e4b, order, &max);
692                 MB_CHECK_ASSERT(buddy);
693                 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
694                 MB_CHECK_ASSERT(buddy2);
695                 MB_CHECK_ASSERT(buddy != buddy2);
696                 MB_CHECK_ASSERT(max * 2 == max2);
697
698                 count = 0;
699                 for (i = 0; i < max; i++) {
700
701                         if (mb_test_bit(i, buddy)) {
702                                 /* only single bit in buddy2 may be 0 */
703                                 if (!mb_test_bit(i << 1, buddy2)) {
704                                         MB_CHECK_ASSERT(
705                                                 mb_test_bit((i<<1)+1, buddy2));
706                                 }
707                                 continue;
708                         }
709
710                         /* both bits in buddy2 must be 1 */
711                         MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
712                         MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
713
714                         for (j = 0; j < (1 << order); j++) {
715                                 k = (i * (1 << order)) + j;
716                                 MB_CHECK_ASSERT(
717                                         !mb_test_bit(k, e4b->bd_bitmap));
718                         }
719                         count++;
720                 }
721                 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
722                 order--;
723         }
724
725         fstart = -1;
726         buddy = mb_find_buddy(e4b, 0, &max);
727         for (i = 0; i < max; i++) {
728                 if (!mb_test_bit(i, buddy)) {
729                         MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
730                         if (fstart == -1) {
731                                 fragments++;
732                                 fstart = i;
733                         }
734                         continue;
735                 }
736                 fstart = -1;
737                 /* check used bits only */
738                 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
739                         buddy2 = mb_find_buddy(e4b, j, &max2);
740                         k = i >> j;
741                         MB_CHECK_ASSERT(k < max2);
742                         MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
743                 }
744         }
745         MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
746         MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
747
748         grp = ext4_get_group_info(sb, e4b->bd_group);
749         if (!grp)
750                 return NULL;
751         list_for_each(cur, &grp->bb_prealloc_list) {
752                 ext4_group_t groupnr;
753                 struct ext4_prealloc_space *pa;
754                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
755                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
756                 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
757                 for (i = 0; i < pa->pa_len; i++)
758                         MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
759         }
760         return 0;
761 }
762 #undef MB_CHECK_ASSERT
763 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,       \
764                                         __FILE__, __func__, __LINE__)
765 #else
766 #define mb_check_buddy(e4b)
767 #endif
768
769 /*
770  * Divide blocks started from @first with length @len into
771  * smaller chunks with power of 2 blocks.
772  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
773  * then increase bb_counters[] for corresponded chunk size.
774  */
775 static void ext4_mb_mark_free_simple(struct super_block *sb,
776                                 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
777                                         struct ext4_group_info *grp)
778 {
779         struct ext4_sb_info *sbi = EXT4_SB(sb);
780         ext4_grpblk_t min;
781         ext4_grpblk_t max;
782         ext4_grpblk_t chunk;
783         unsigned int border;
784
785         BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
786
787         border = 2 << sb->s_blocksize_bits;
788
789         while (len > 0) {
790                 /* find how many blocks can be covered since this position */
791                 max = ffs(first | border) - 1;
792
793                 /* find how many blocks of power 2 we need to mark */
794                 min = fls(len) - 1;
795
796                 if (max < min)
797                         min = max;
798                 chunk = 1 << min;
799
800                 /* mark multiblock chunks only */
801                 grp->bb_counters[min]++;
802                 if (min > 0)
803                         mb_clear_bit(first >> min,
804                                      buddy + sbi->s_mb_offsets[min]);
805
806                 len -= chunk;
807                 first += chunk;
808         }
809 }
810
811 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
812 {
813         int order;
814
815         /*
816          * We don't bother with a special lists groups with only 1 block free
817          * extents and for completely empty groups.
818          */
819         order = fls(len) - 2;
820         if (order < 0)
821                 return 0;
822         if (order == MB_NUM_ORDERS(sb))
823                 order--;
824         return order;
825 }
826
827 /* Move group to appropriate avg_fragment_size list */
828 static void
829 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
830 {
831         struct ext4_sb_info *sbi = EXT4_SB(sb);
832         int new_order;
833
834         if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
835                 return;
836
837         new_order = mb_avg_fragment_size_order(sb,
838                                         grp->bb_free / grp->bb_fragments);
839         if (new_order == grp->bb_avg_fragment_size_order)
840                 return;
841
842         if (grp->bb_avg_fragment_size_order != -1) {
843                 write_lock(&sbi->s_mb_avg_fragment_size_locks[
844                                         grp->bb_avg_fragment_size_order]);
845                 list_del(&grp->bb_avg_fragment_size_node);
846                 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
847                                         grp->bb_avg_fragment_size_order]);
848         }
849         grp->bb_avg_fragment_size_order = new_order;
850         write_lock(&sbi->s_mb_avg_fragment_size_locks[
851                                         grp->bb_avg_fragment_size_order]);
852         list_add_tail(&grp->bb_avg_fragment_size_node,
853                 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
854         write_unlock(&sbi->s_mb_avg_fragment_size_locks[
855                                         grp->bb_avg_fragment_size_order]);
856 }
857
858 /*
859  * Choose next group by traversing largest_free_order lists. Updates *new_cr if
860  * cr level needs an update.
861  */
862 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
863                         int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
864 {
865         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
866         struct ext4_group_info *iter, *grp;
867         int i;
868
869         if (ac->ac_status == AC_STATUS_FOUND)
870                 return;
871
872         if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
873                 atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
874
875         grp = NULL;
876         for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
877                 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
878                         continue;
879                 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
880                 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
881                         read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
882                         continue;
883                 }
884                 grp = NULL;
885                 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
886                                     bb_largest_free_order_node) {
887                         if (sbi->s_mb_stats)
888                                 atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
889                         if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
890                                 grp = iter;
891                                 break;
892                         }
893                 }
894                 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
895                 if (grp)
896                         break;
897         }
898
899         if (!grp) {
900                 /* Increment cr and search again */
901                 *new_cr = 1;
902         } else {
903                 *group = grp->bb_group;
904                 ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
905         }
906 }
907
908 /*
909  * Choose next group by traversing average fragment size list of suitable
910  * order. Updates *new_cr if cr level needs an update.
911  */
912 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
913                 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
914 {
915         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
916         struct ext4_group_info *grp = NULL, *iter;
917         int i;
918
919         if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
920                 if (sbi->s_mb_stats)
921                         atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
922         }
923
924         for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
925              i < MB_NUM_ORDERS(ac->ac_sb); i++) {
926                 if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
927                         continue;
928                 read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
929                 if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
930                         read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
931                         continue;
932                 }
933                 list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
934                                     bb_avg_fragment_size_node) {
935                         if (sbi->s_mb_stats)
936                                 atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
937                         if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
938                                 grp = iter;
939                                 break;
940                         }
941                 }
942                 read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
943                 if (grp)
944                         break;
945         }
946
947         if (grp) {
948                 *group = grp->bb_group;
949                 ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
950         } else {
951                 *new_cr = 2;
952         }
953 }
954
955 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
956 {
957         if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
958                 return 0;
959         if (ac->ac_criteria >= 2)
960                 return 0;
961         if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
962                 return 0;
963         return 1;
964 }
965
966 /*
967  * Return next linear group for allocation. If linear traversal should not be
968  * performed, this function just returns the same group
969  */
970 static ext4_group_t
971 next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
972                   ext4_group_t ngroups)
973 {
974         if (!should_optimize_scan(ac))
975                 goto inc_and_return;
976
977         if (ac->ac_groups_linear_remaining) {
978                 ac->ac_groups_linear_remaining--;
979                 goto inc_and_return;
980         }
981
982         return group;
983 inc_and_return:
984         /*
985          * Artificially restricted ngroups for non-extent
986          * files makes group > ngroups possible on first loop.
987          */
988         return group + 1 >= ngroups ? 0 : group + 1;
989 }
990
991 /*
992  * ext4_mb_choose_next_group: choose next group for allocation.
993  *
994  * @ac        Allocation Context
995  * @new_cr    This is an output parameter. If the there is no good group
996  *            available at current CR level, this field is updated to indicate
997  *            the new cr level that should be used.
998  * @group     This is an input / output parameter. As an input it indicates the
999  *            next group that the allocator intends to use for allocation. As
1000  *            output, this field indicates the next group that should be used as
1001  *            determined by the optimization functions.
1002  * @ngroups   Total number of groups
1003  */
1004 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1005                 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1006 {
1007         *new_cr = ac->ac_criteria;
1008
1009         if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1010                 *group = next_linear_group(ac, *group, ngroups);
1011                 return;
1012         }
1013
1014         if (*new_cr == 0) {
1015                 ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
1016         } else if (*new_cr == 1) {
1017                 ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
1018         } else {
1019                 /*
1020                  * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1021                  * bb_free. But until that happens, we should never come here.
1022                  */
1023                 WARN_ON(1);
1024         }
1025 }
1026
1027 /*
1028  * Cache the order of the largest free extent we have available in this block
1029  * group.
1030  */
1031 static void
1032 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1033 {
1034         struct ext4_sb_info *sbi = EXT4_SB(sb);
1035         int i;
1036
1037         for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1038                 if (grp->bb_counters[i] > 0)
1039                         break;
1040         /* No need to move between order lists? */
1041         if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1042             i == grp->bb_largest_free_order) {
1043                 grp->bb_largest_free_order = i;
1044                 return;
1045         }
1046
1047         if (grp->bb_largest_free_order >= 0) {
1048                 write_lock(&sbi->s_mb_largest_free_orders_locks[
1049                                               grp->bb_largest_free_order]);
1050                 list_del_init(&grp->bb_largest_free_order_node);
1051                 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1052                                               grp->bb_largest_free_order]);
1053         }
1054         grp->bb_largest_free_order = i;
1055         if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1056                 write_lock(&sbi->s_mb_largest_free_orders_locks[
1057                                               grp->bb_largest_free_order]);
1058                 list_add_tail(&grp->bb_largest_free_order_node,
1059                       &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1060                 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1061                                               grp->bb_largest_free_order]);
1062         }
1063 }
1064
1065 static noinline_for_stack
1066 void ext4_mb_generate_buddy(struct super_block *sb,
1067                             void *buddy, void *bitmap, ext4_group_t group,
1068                             struct ext4_group_info *grp)
1069 {
1070         struct ext4_sb_info *sbi = EXT4_SB(sb);
1071         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1072         ext4_grpblk_t i = 0;
1073         ext4_grpblk_t first;
1074         ext4_grpblk_t len;
1075         unsigned free = 0;
1076         unsigned fragments = 0;
1077         unsigned long long period = get_cycles();
1078
1079         /* initialize buddy from bitmap which is aggregation
1080          * of on-disk bitmap and preallocations */
1081         i = mb_find_next_zero_bit(bitmap, max, 0);
1082         grp->bb_first_free = i;
1083         while (i < max) {
1084                 fragments++;
1085                 first = i;
1086                 i = mb_find_next_bit(bitmap, max, i);
1087                 len = i - first;
1088                 free += len;
1089                 if (len > 1)
1090                         ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1091                 else
1092                         grp->bb_counters[0]++;
1093                 if (i < max)
1094                         i = mb_find_next_zero_bit(bitmap, max, i);
1095         }
1096         grp->bb_fragments = fragments;
1097
1098         if (free != grp->bb_free) {
1099                 ext4_grp_locked_error(sb, group, 0, 0,
1100                                       "block bitmap and bg descriptor "
1101                                       "inconsistent: %u vs %u free clusters",
1102                                       free, grp->bb_free);
1103                 /*
1104                  * If we intend to continue, we consider group descriptor
1105                  * corrupt and update bb_free using bitmap value
1106                  */
1107                 grp->bb_free = free;
1108                 ext4_mark_group_bitmap_corrupted(sb, group,
1109                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1110         }
1111         mb_set_largest_free_order(sb, grp);
1112         mb_update_avg_fragment_size(sb, grp);
1113
1114         clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1115
1116         period = get_cycles() - period;
1117         atomic_inc(&sbi->s_mb_buddies_generated);
1118         atomic64_add(period, &sbi->s_mb_generation_time);
1119 }
1120
1121 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
1122 {
1123         int count;
1124         int order = 1;
1125         void *buddy;
1126
1127         while ((buddy = mb_find_buddy(e4b, order++, &count)))
1128                 mb_set_bits(buddy, 0, count);
1129
1130         e4b->bd_info->bb_fragments = 0;
1131         memset(e4b->bd_info->bb_counters, 0,
1132                 sizeof(*e4b->bd_info->bb_counters) *
1133                 (e4b->bd_sb->s_blocksize_bits + 2));
1134
1135         ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
1136                 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
1137 }
1138
1139 /* The buddy information is attached the buddy cache inode
1140  * for convenience. The information regarding each group
1141  * is loaded via ext4_mb_load_buddy. The information involve
1142  * block bitmap and buddy information. The information are
1143  * stored in the inode as
1144  *
1145  * {                        page                        }
1146  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1147  *
1148  *
1149  * one block each for bitmap and buddy information.
1150  * So for each group we take up 2 blocks. A page can
1151  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1152  * So it can have information regarding groups_per_page which
1153  * is blocks_per_page/2
1154  *
1155  * Locking note:  This routine takes the block group lock of all groups
1156  * for this page; do not hold this lock when calling this routine!
1157  */
1158
1159 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1160 {
1161         ext4_group_t ngroups;
1162         int blocksize;
1163         int blocks_per_page;
1164         int groups_per_page;
1165         int err = 0;
1166         int i;
1167         ext4_group_t first_group, group;
1168         int first_block;
1169         struct super_block *sb;
1170         struct buffer_head *bhs;
1171         struct buffer_head **bh = NULL;
1172         struct inode *inode;
1173         char *data;
1174         char *bitmap;
1175         struct ext4_group_info *grinfo;
1176
1177         inode = page->mapping->host;
1178         sb = inode->i_sb;
1179         ngroups = ext4_get_groups_count(sb);
1180         blocksize = i_blocksize(inode);
1181         blocks_per_page = PAGE_SIZE / blocksize;
1182
1183         mb_debug(sb, "init page %lu\n", page->index);
1184
1185         groups_per_page = blocks_per_page >> 1;
1186         if (groups_per_page == 0)
1187                 groups_per_page = 1;
1188
1189         /* allocate buffer_heads to read bitmaps */
1190         if (groups_per_page > 1) {
1191                 i = sizeof(struct buffer_head *) * groups_per_page;
1192                 bh = kzalloc(i, gfp);
1193                 if (bh == NULL) {
1194                         err = -ENOMEM;
1195                         goto out;
1196                 }
1197         } else
1198                 bh = &bhs;
1199
1200         first_group = page->index * blocks_per_page / 2;
1201
1202         /* read all groups the page covers into the cache */
1203         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1204                 if (group >= ngroups)
1205                         break;
1206
1207                 grinfo = ext4_get_group_info(sb, group);
1208                 if (!grinfo)
1209                         continue;
1210                 /*
1211                  * If page is uptodate then we came here after online resize
1212                  * which added some new uninitialized group info structs, so
1213                  * we must skip all initialized uptodate buddies on the page,
1214                  * which may be currently in use by an allocating task.
1215                  */
1216                 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1217                         bh[i] = NULL;
1218                         continue;
1219                 }
1220                 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1221                 if (IS_ERR(bh[i])) {
1222                         err = PTR_ERR(bh[i]);
1223                         bh[i] = NULL;
1224                         goto out;
1225                 }
1226                 mb_debug(sb, "read bitmap for group %u\n", group);
1227         }
1228
1229         /* wait for I/O completion */
1230         for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1231                 int err2;
1232
1233                 if (!bh[i])
1234                         continue;
1235                 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1236                 if (!err)
1237                         err = err2;
1238         }
1239
1240         first_block = page->index * blocks_per_page;
1241         for (i = 0; i < blocks_per_page; i++) {
1242                 group = (first_block + i) >> 1;
1243                 if (group >= ngroups)
1244                         break;
1245
1246                 if (!bh[group - first_group])
1247                         /* skip initialized uptodate buddy */
1248                         continue;
1249
1250                 if (!buffer_verified(bh[group - first_group]))
1251                         /* Skip faulty bitmaps */
1252                         continue;
1253                 err = 0;
1254
1255                 /*
1256                  * data carry information regarding this
1257                  * particular group in the format specified
1258                  * above
1259                  *
1260                  */
1261                 data = page_address(page) + (i * blocksize);
1262                 bitmap = bh[group - first_group]->b_data;
1263
1264                 /*
1265                  * We place the buddy block and bitmap block
1266                  * close together
1267                  */
1268                 if ((first_block + i) & 1) {
1269                         /* this is block of buddy */
1270                         BUG_ON(incore == NULL);
1271                         mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1272                                 group, page->index, i * blocksize);
1273                         trace_ext4_mb_buddy_bitmap_load(sb, group);
1274                         grinfo = ext4_get_group_info(sb, group);
1275                         if (!grinfo) {
1276                                 err = -EFSCORRUPTED;
1277                                 goto out;
1278                         }
1279                         grinfo->bb_fragments = 0;
1280                         memset(grinfo->bb_counters, 0,
1281                                sizeof(*grinfo->bb_counters) *
1282                                (MB_NUM_ORDERS(sb)));
1283                         /*
1284                          * incore got set to the group block bitmap below
1285                          */
1286                         ext4_lock_group(sb, group);
1287                         /* init the buddy */
1288                         memset(data, 0xff, blocksize);
1289                         ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
1290                         ext4_unlock_group(sb, group);
1291                         incore = NULL;
1292                 } else {
1293                         /* this is block of bitmap */
1294                         BUG_ON(incore != NULL);
1295                         mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1296                                 group, page->index, i * blocksize);
1297                         trace_ext4_mb_bitmap_load(sb, group);
1298
1299                         /* see comments in ext4_mb_put_pa() */
1300                         ext4_lock_group(sb, group);
1301                         memcpy(data, bitmap, blocksize);
1302
1303                         /* mark all preallocated blks used in in-core bitmap */
1304                         ext4_mb_generate_from_pa(sb, data, group);
1305                         ext4_mb_generate_from_freelist(sb, data, group);
1306                         ext4_unlock_group(sb, group);
1307
1308                         /* set incore so that the buddy information can be
1309                          * generated using this
1310                          */
1311                         incore = data;
1312                 }
1313         }
1314         SetPageUptodate(page);
1315
1316 out:
1317         if (bh) {
1318                 for (i = 0; i < groups_per_page; i++)
1319                         brelse(bh[i]);
1320                 if (bh != &bhs)
1321                         kfree(bh);
1322         }
1323         return err;
1324 }
1325
1326 /*
1327  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1328  * on the same buddy page doesn't happen whild holding the buddy page lock.
1329  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1330  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1331  */
1332 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1333                 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1334 {
1335         struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1336         int block, pnum, poff;
1337         int blocks_per_page;
1338         struct page *page;
1339
1340         e4b->bd_buddy_page = NULL;
1341         e4b->bd_bitmap_page = NULL;
1342
1343         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1344         /*
1345          * the buddy cache inode stores the block bitmap
1346          * and buddy information in consecutive blocks.
1347          * So for each group we need two blocks.
1348          */
1349         block = group * 2;
1350         pnum = block / blocks_per_page;
1351         poff = block % blocks_per_page;
1352         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1353         if (!page)
1354                 return -ENOMEM;
1355         BUG_ON(page->mapping != inode->i_mapping);
1356         e4b->bd_bitmap_page = page;
1357         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1358
1359         if (blocks_per_page >= 2) {
1360                 /* buddy and bitmap are on the same page */
1361                 return 0;
1362         }
1363
1364         block++;
1365         pnum = block / blocks_per_page;
1366         page = find_or_create_page(inode->i_mapping, pnum, gfp);
1367         if (!page)
1368                 return -ENOMEM;
1369         BUG_ON(page->mapping != inode->i_mapping);
1370         e4b->bd_buddy_page = page;
1371         return 0;
1372 }
1373
1374 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1375 {
1376         if (e4b->bd_bitmap_page) {
1377                 unlock_page(e4b->bd_bitmap_page);
1378                 put_page(e4b->bd_bitmap_page);
1379         }
1380         if (e4b->bd_buddy_page) {
1381                 unlock_page(e4b->bd_buddy_page);
1382                 put_page(e4b->bd_buddy_page);
1383         }
1384 }
1385
1386 /*
1387  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1388  * block group lock of all groups for this page; do not hold the BG lock when
1389  * calling this routine!
1390  */
1391 static noinline_for_stack
1392 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1393 {
1394
1395         struct ext4_group_info *this_grp;
1396         struct ext4_buddy e4b;
1397         struct page *page;
1398         int ret = 0;
1399
1400         might_sleep();
1401         mb_debug(sb, "init group %u\n", group);
1402         this_grp = ext4_get_group_info(sb, group);
1403         if (!this_grp)
1404                 return -EFSCORRUPTED;
1405
1406         /*
1407          * This ensures that we don't reinit the buddy cache
1408          * page which map to the group from which we are already
1409          * allocating. If we are looking at the buddy cache we would
1410          * have taken a reference using ext4_mb_load_buddy and that
1411          * would have pinned buddy page to page cache.
1412          * The call to ext4_mb_get_buddy_page_lock will mark the
1413          * page accessed.
1414          */
1415         ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1416         if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1417                 /*
1418                  * somebody initialized the group
1419                  * return without doing anything
1420                  */
1421                 goto err;
1422         }
1423
1424         page = e4b.bd_bitmap_page;
1425         ret = ext4_mb_init_cache(page, NULL, gfp);
1426         if (ret)
1427                 goto err;
1428         if (!PageUptodate(page)) {
1429                 ret = -EIO;
1430                 goto err;
1431         }
1432
1433         if (e4b.bd_buddy_page == NULL) {
1434                 /*
1435                  * If both the bitmap and buddy are in
1436                  * the same page we don't need to force
1437                  * init the buddy
1438                  */
1439                 ret = 0;
1440                 goto err;
1441         }
1442         /* init buddy cache */
1443         page = e4b.bd_buddy_page;
1444         ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1445         if (ret)
1446                 goto err;
1447         if (!PageUptodate(page)) {
1448                 ret = -EIO;
1449                 goto err;
1450         }
1451 err:
1452         ext4_mb_put_buddy_page_lock(&e4b);
1453         return ret;
1454 }
1455
1456 /*
1457  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1458  * block group lock of all groups for this page; do not hold the BG lock when
1459  * calling this routine!
1460  */
1461 static noinline_for_stack int
1462 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1463                        struct ext4_buddy *e4b, gfp_t gfp)
1464 {
1465         int blocks_per_page;
1466         int block;
1467         int pnum;
1468         int poff;
1469         struct page *page;
1470         int ret;
1471         struct ext4_group_info *grp;
1472         struct ext4_sb_info *sbi = EXT4_SB(sb);
1473         struct inode *inode = sbi->s_buddy_cache;
1474
1475         might_sleep();
1476         mb_debug(sb, "load group %u\n", group);
1477
1478         blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1479         grp = ext4_get_group_info(sb, group);
1480         if (!grp)
1481                 return -EFSCORRUPTED;
1482
1483         e4b->bd_blkbits = sb->s_blocksize_bits;
1484         e4b->bd_info = grp;
1485         e4b->bd_sb = sb;
1486         e4b->bd_group = group;
1487         e4b->bd_buddy_page = NULL;
1488         e4b->bd_bitmap_page = NULL;
1489
1490         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1491                 /*
1492                  * we need full data about the group
1493                  * to make a good selection
1494                  */
1495                 ret = ext4_mb_init_group(sb, group, gfp);
1496                 if (ret)
1497                         return ret;
1498         }
1499
1500         /*
1501          * the buddy cache inode stores the block bitmap
1502          * and buddy information in consecutive blocks.
1503          * So for each group we need two blocks.
1504          */
1505         block = group * 2;
1506         pnum = block / blocks_per_page;
1507         poff = block % blocks_per_page;
1508
1509         /* we could use find_or_create_page(), but it locks page
1510          * what we'd like to avoid in fast path ... */
1511         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1512         if (page == NULL || !PageUptodate(page)) {
1513                 if (page)
1514                         /*
1515                          * drop the page reference and try
1516                          * to get the page with lock. If we
1517                          * are not uptodate that implies
1518                          * somebody just created the page but
1519                          * is yet to initialize the same. So
1520                          * wait for it to initialize.
1521                          */
1522                         put_page(page);
1523                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1524                 if (page) {
1525                         BUG_ON(page->mapping != inode->i_mapping);
1526                         if (!PageUptodate(page)) {
1527                                 ret = ext4_mb_init_cache(page, NULL, gfp);
1528                                 if (ret) {
1529                                         unlock_page(page);
1530                                         goto err;
1531                                 }
1532                                 mb_cmp_bitmaps(e4b, page_address(page) +
1533                                                (poff * sb->s_blocksize));
1534                         }
1535                         unlock_page(page);
1536                 }
1537         }
1538         if (page == NULL) {
1539                 ret = -ENOMEM;
1540                 goto err;
1541         }
1542         if (!PageUptodate(page)) {
1543                 ret = -EIO;
1544                 goto err;
1545         }
1546
1547         /* Pages marked accessed already */
1548         e4b->bd_bitmap_page = page;
1549         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1550
1551         block++;
1552         pnum = block / blocks_per_page;
1553         poff = block % blocks_per_page;
1554
1555         page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1556         if (page == NULL || !PageUptodate(page)) {
1557                 if (page)
1558                         put_page(page);
1559                 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1560                 if (page) {
1561                         BUG_ON(page->mapping != inode->i_mapping);
1562                         if (!PageUptodate(page)) {
1563                                 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1564                                                          gfp);
1565                                 if (ret) {
1566                                         unlock_page(page);
1567                                         goto err;
1568                                 }
1569                         }
1570                         unlock_page(page);
1571                 }
1572         }
1573         if (page == NULL) {
1574                 ret = -ENOMEM;
1575                 goto err;
1576         }
1577         if (!PageUptodate(page)) {
1578                 ret = -EIO;
1579                 goto err;
1580         }
1581
1582         /* Pages marked accessed already */
1583         e4b->bd_buddy_page = page;
1584         e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1585
1586         return 0;
1587
1588 err:
1589         if (page)
1590                 put_page(page);
1591         if (e4b->bd_bitmap_page)
1592                 put_page(e4b->bd_bitmap_page);
1593         if (e4b->bd_buddy_page)
1594                 put_page(e4b->bd_buddy_page);
1595         e4b->bd_buddy = NULL;
1596         e4b->bd_bitmap = NULL;
1597         return ret;
1598 }
1599
1600 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1601                               struct ext4_buddy *e4b)
1602 {
1603         return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1604 }
1605
1606 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1607 {
1608         if (e4b->bd_bitmap_page)
1609                 put_page(e4b->bd_bitmap_page);
1610         if (e4b->bd_buddy_page)
1611                 put_page(e4b->bd_buddy_page);
1612 }
1613
1614
1615 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1616 {
1617         int order = 1, max;
1618         void *bb;
1619
1620         BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1621         BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1622
1623         while (order <= e4b->bd_blkbits + 1) {
1624                 bb = mb_find_buddy(e4b, order, &max);
1625                 if (!mb_test_bit(block >> order, bb)) {
1626                         /* this block is part of buddy of order 'order' */
1627                         return order;
1628                 }
1629                 order++;
1630         }
1631         return 0;
1632 }
1633
1634 static void mb_clear_bits(void *bm, int cur, int len)
1635 {
1636         __u32 *addr;
1637
1638         len = cur + len;
1639         while (cur < len) {
1640                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1641                         /* fast path: clear whole word at once */
1642                         addr = bm + (cur >> 3);
1643                         *addr = 0;
1644                         cur += 32;
1645                         continue;
1646                 }
1647                 mb_clear_bit(cur, bm);
1648                 cur++;
1649         }
1650 }
1651
1652 /* clear bits in given range
1653  * will return first found zero bit if any, -1 otherwise
1654  */
1655 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1656 {
1657         __u32 *addr;
1658         int zero_bit = -1;
1659
1660         len = cur + len;
1661         while (cur < len) {
1662                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1663                         /* fast path: clear whole word at once */
1664                         addr = bm + (cur >> 3);
1665                         if (*addr != (__u32)(-1) && zero_bit == -1)
1666                                 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1667                         *addr = 0;
1668                         cur += 32;
1669                         continue;
1670                 }
1671                 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1672                         zero_bit = cur;
1673                 cur++;
1674         }
1675
1676         return zero_bit;
1677 }
1678
1679 void mb_set_bits(void *bm, int cur, int len)
1680 {
1681         __u32 *addr;
1682
1683         len = cur + len;
1684         while (cur < len) {
1685                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1686                         /* fast path: set whole word at once */
1687                         addr = bm + (cur >> 3);
1688                         *addr = 0xffffffff;
1689                         cur += 32;
1690                         continue;
1691                 }
1692                 mb_set_bit(cur, bm);
1693                 cur++;
1694         }
1695 }
1696
1697 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1698 {
1699         if (mb_test_bit(*bit + side, bitmap)) {
1700                 mb_clear_bit(*bit, bitmap);
1701                 (*bit) -= side;
1702                 return 1;
1703         }
1704         else {
1705                 (*bit) += side;
1706                 mb_set_bit(*bit, bitmap);
1707                 return -1;
1708         }
1709 }
1710
1711 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1712 {
1713         int max;
1714         int order = 1;
1715         void *buddy = mb_find_buddy(e4b, order, &max);
1716
1717         while (buddy) {
1718                 void *buddy2;
1719
1720                 /* Bits in range [first; last] are known to be set since
1721                  * corresponding blocks were allocated. Bits in range
1722                  * (first; last) will stay set because they form buddies on
1723                  * upper layer. We just deal with borders if they don't
1724                  * align with upper layer and then go up.
1725                  * Releasing entire group is all about clearing
1726                  * single bit of highest order buddy.
1727                  */
1728
1729                 /* Example:
1730                  * ---------------------------------
1731                  * |   1   |   1   |   1   |   1   |
1732                  * ---------------------------------
1733                  * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1734                  * ---------------------------------
1735                  *   0   1   2   3   4   5   6   7
1736                  *      \_____________________/
1737                  *
1738                  * Neither [1] nor [6] is aligned to above layer.
1739                  * Left neighbour [0] is free, so mark it busy,
1740                  * decrease bb_counters and extend range to
1741                  * [0; 6]
1742                  * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1743                  * mark [6] free, increase bb_counters and shrink range to
1744                  * [0; 5].
1745                  * Then shift range to [0; 2], go up and do the same.
1746                  */
1747
1748
1749                 if (first & 1)
1750                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1751                 if (!(last & 1))
1752                         e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1753                 if (first > last)
1754                         break;
1755                 order++;
1756
1757                 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1758                         mb_clear_bits(buddy, first, last - first + 1);
1759                         e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1760                         break;
1761                 }
1762                 first >>= 1;
1763                 last >>= 1;
1764                 buddy = buddy2;
1765         }
1766 }
1767
1768 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1769                            int first, int count)
1770 {
1771         int left_is_free = 0;
1772         int right_is_free = 0;
1773         int block;
1774         int last = first + count - 1;
1775         struct super_block *sb = e4b->bd_sb;
1776
1777         if (WARN_ON(count == 0))
1778                 return;
1779         BUG_ON(last >= (sb->s_blocksize << 3));
1780         assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1781         /* Don't bother if the block group is corrupt. */
1782         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1783                 return;
1784
1785         mb_check_buddy(e4b);
1786         mb_free_blocks_double(inode, e4b, first, count);
1787
1788         /* access memory sequentially: check left neighbour,
1789          * clear range and then check right neighbour
1790          */
1791         if (first != 0)
1792                 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1793         block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1794         if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1795                 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1796
1797         if (unlikely(block != -1)) {
1798                 struct ext4_sb_info *sbi = EXT4_SB(sb);
1799                 ext4_fsblk_t blocknr;
1800
1801                 /*
1802                  * Fastcommit replay can free already freed blocks which
1803                  * corrupts allocation info. Regenerate it.
1804                  */
1805                 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
1806                         mb_regenerate_buddy(e4b);
1807                         goto check;
1808                 }
1809
1810                 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1811                 blocknr += EXT4_C2B(sbi, block);
1812                 ext4_grp_locked_error(sb, e4b->bd_group,
1813                                       inode ? inode->i_ino : 0, blocknr,
1814                                       "freeing already freed block (bit %u); block bitmap corrupt.",
1815                                       block);
1816                 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
1817                                 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1818                 return;
1819         }
1820
1821         this_cpu_inc(discard_pa_seq);
1822         e4b->bd_info->bb_free += count;
1823         if (first < e4b->bd_info->bb_first_free)
1824                 e4b->bd_info->bb_first_free = first;
1825
1826         /* let's maintain fragments counter */
1827         if (left_is_free && right_is_free)
1828                 e4b->bd_info->bb_fragments--;
1829         else if (!left_is_free && !right_is_free)
1830                 e4b->bd_info->bb_fragments++;
1831
1832         /* buddy[0] == bd_bitmap is a special case, so handle
1833          * it right away and let mb_buddy_mark_free stay free of
1834          * zero order checks.
1835          * Check if neighbours are to be coaleasced,
1836          * adjust bitmap bb_counters and borders appropriately.
1837          */
1838         if (first & 1) {
1839                 first += !left_is_free;
1840                 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1841         }
1842         if (!(last & 1)) {
1843                 last -= !right_is_free;
1844                 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1845         }
1846
1847         if (first <= last)
1848                 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1849
1850         mb_set_largest_free_order(sb, e4b->bd_info);
1851         mb_update_avg_fragment_size(sb, e4b->bd_info);
1852 check:
1853         mb_check_buddy(e4b);
1854 }
1855
1856 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1857                                 int needed, struct ext4_free_extent *ex)
1858 {
1859         int next = block;
1860         int max, order;
1861         void *buddy;
1862
1863         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1864         BUG_ON(ex == NULL);
1865
1866         buddy = mb_find_buddy(e4b, 0, &max);
1867         BUG_ON(buddy == NULL);
1868         BUG_ON(block >= max);
1869         if (mb_test_bit(block, buddy)) {
1870                 ex->fe_len = 0;
1871                 ex->fe_start = 0;
1872                 ex->fe_group = 0;
1873                 return 0;
1874         }
1875
1876         /* find actual order */
1877         order = mb_find_order_for_block(e4b, block);
1878         block = block >> order;
1879
1880         ex->fe_len = 1 << order;
1881         ex->fe_start = block << order;
1882         ex->fe_group = e4b->bd_group;
1883
1884         /* calc difference from given start */
1885         next = next - ex->fe_start;
1886         ex->fe_len -= next;
1887         ex->fe_start += next;
1888
1889         while (needed > ex->fe_len &&
1890                mb_find_buddy(e4b, order, &max)) {
1891
1892                 if (block + 1 >= max)
1893                         break;
1894
1895                 next = (block + 1) * (1 << order);
1896                 if (mb_test_bit(next, e4b->bd_bitmap))
1897                         break;
1898
1899                 order = mb_find_order_for_block(e4b, next);
1900
1901                 block = next >> order;
1902                 ex->fe_len += 1 << order;
1903         }
1904
1905         if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1906                 /* Should never happen! (but apparently sometimes does?!?) */
1907                 WARN_ON(1);
1908                 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1909                         "corruption or bug in mb_find_extent "
1910                         "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1911                         block, order, needed, ex->fe_group, ex->fe_start,
1912                         ex->fe_len, ex->fe_logical);
1913                 ex->fe_len = 0;
1914                 ex->fe_start = 0;
1915                 ex->fe_group = 0;
1916         }
1917         return ex->fe_len;
1918 }
1919
1920 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1921 {
1922         int ord;
1923         int mlen = 0;
1924         int max = 0;
1925         int cur;
1926         int start = ex->fe_start;
1927         int len = ex->fe_len;
1928         unsigned ret = 0;
1929         int len0 = len;
1930         void *buddy;
1931         bool split = false;
1932
1933         BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1934         BUG_ON(e4b->bd_group != ex->fe_group);
1935         assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1936         mb_check_buddy(e4b);
1937         mb_mark_used_double(e4b, start, len);
1938
1939         this_cpu_inc(discard_pa_seq);
1940         e4b->bd_info->bb_free -= len;
1941         if (e4b->bd_info->bb_first_free == start)
1942                 e4b->bd_info->bb_first_free += len;
1943
1944         /* let's maintain fragments counter */
1945         if (start != 0)
1946                 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1947         if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1948                 max = !mb_test_bit(start + len, e4b->bd_bitmap);
1949         if (mlen && max)
1950                 e4b->bd_info->bb_fragments++;
1951         else if (!mlen && !max)
1952                 e4b->bd_info->bb_fragments--;
1953
1954         /* let's maintain buddy itself */
1955         while (len) {
1956                 if (!split)
1957                         ord = mb_find_order_for_block(e4b, start);
1958
1959                 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1960                         /* the whole chunk may be allocated at once! */
1961                         mlen = 1 << ord;
1962                         if (!split)
1963                                 buddy = mb_find_buddy(e4b, ord, &max);
1964                         else
1965                                 split = false;
1966                         BUG_ON((start >> ord) >= max);
1967                         mb_set_bit(start >> ord, buddy);
1968                         e4b->bd_info->bb_counters[ord]--;
1969                         start += mlen;
1970                         len -= mlen;
1971                         BUG_ON(len < 0);
1972                         continue;
1973                 }
1974
1975                 /* store for history */
1976                 if (ret == 0)
1977                         ret = len | (ord << 16);
1978
1979                 /* we have to split large buddy */
1980                 BUG_ON(ord <= 0);
1981                 buddy = mb_find_buddy(e4b, ord, &max);
1982                 mb_set_bit(start >> ord, buddy);
1983                 e4b->bd_info->bb_counters[ord]--;
1984
1985                 ord--;
1986                 cur = (start >> ord) & ~1U;
1987                 buddy = mb_find_buddy(e4b, ord, &max);
1988                 mb_clear_bit(cur, buddy);
1989                 mb_clear_bit(cur + 1, buddy);
1990                 e4b->bd_info->bb_counters[ord]++;
1991                 e4b->bd_info->bb_counters[ord]++;
1992                 split = true;
1993         }
1994         mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1995
1996         mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
1997         mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1998         mb_check_buddy(e4b);
1999
2000         return ret;
2001 }
2002
2003 /*
2004  * Must be called under group lock!
2005  */
2006 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2007                                         struct ext4_buddy *e4b)
2008 {
2009         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2010         int ret;
2011
2012         BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2013         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2014
2015         ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2016         ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2017         ret = mb_mark_used(e4b, &ac->ac_b_ex);
2018
2019         /* preallocation can change ac_b_ex, thus we store actually
2020          * allocated blocks for history */
2021         ac->ac_f_ex = ac->ac_b_ex;
2022
2023         ac->ac_status = AC_STATUS_FOUND;
2024         ac->ac_tail = ret & 0xffff;
2025         ac->ac_buddy = ret >> 16;
2026
2027         /*
2028          * take the page reference. We want the page to be pinned
2029          * so that we don't get a ext4_mb_init_cache_call for this
2030          * group until we update the bitmap. That would mean we
2031          * double allocate blocks. The reference is dropped
2032          * in ext4_mb_release_context
2033          */
2034         ac->ac_bitmap_page = e4b->bd_bitmap_page;
2035         get_page(ac->ac_bitmap_page);
2036         ac->ac_buddy_page = e4b->bd_buddy_page;
2037         get_page(ac->ac_buddy_page);
2038         /* store last allocated for subsequent stream allocation */
2039         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2040                 spin_lock(&sbi->s_md_lock);
2041                 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2042                 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2043                 spin_unlock(&sbi->s_md_lock);
2044         }
2045         /*
2046          * As we've just preallocated more space than
2047          * user requested originally, we store allocated
2048          * space in a special descriptor.
2049          */
2050         if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2051                 ext4_mb_new_preallocation(ac);
2052
2053 }
2054
2055 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2056                                         struct ext4_buddy *e4b,
2057                                         int finish_group)
2058 {
2059         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2060         struct ext4_free_extent *bex = &ac->ac_b_ex;
2061         struct ext4_free_extent *gex = &ac->ac_g_ex;
2062         struct ext4_free_extent ex;
2063         int max;
2064
2065         if (ac->ac_status == AC_STATUS_FOUND)
2066                 return;
2067         /*
2068          * We don't want to scan for a whole year
2069          */
2070         if (ac->ac_found > sbi->s_mb_max_to_scan &&
2071                         !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2072                 ac->ac_status = AC_STATUS_BREAK;
2073                 return;
2074         }
2075
2076         /*
2077          * Haven't found good chunk so far, let's continue
2078          */
2079         if (bex->fe_len < gex->fe_len)
2080                 return;
2081
2082         if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2083                         && bex->fe_group == e4b->bd_group) {
2084                 /* recheck chunk's availability - we don't know
2085                  * when it was found (within this lock-unlock
2086                  * period or not) */
2087                 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
2088                 if (max >= gex->fe_len) {
2089                         ext4_mb_use_best_found(ac, e4b);
2090                         return;
2091                 }
2092         }
2093 }
2094
2095 /*
2096  * The routine checks whether found extent is good enough. If it is,
2097  * then the extent gets marked used and flag is set to the context
2098  * to stop scanning. Otherwise, the extent is compared with the
2099  * previous found extent and if new one is better, then it's stored
2100  * in the context. Later, the best found extent will be used, if
2101  * mballoc can't find good enough extent.
2102  *
2103  * FIXME: real allocation policy is to be designed yet!
2104  */
2105 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2106                                         struct ext4_free_extent *ex,
2107                                         struct ext4_buddy *e4b)
2108 {
2109         struct ext4_free_extent *bex = &ac->ac_b_ex;
2110         struct ext4_free_extent *gex = &ac->ac_g_ex;
2111
2112         BUG_ON(ex->fe_len <= 0);
2113         BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2114         BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2115         BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2116
2117         ac->ac_found++;
2118
2119         /*
2120          * The special case - take what you catch first
2121          */
2122         if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2123                 *bex = *ex;
2124                 ext4_mb_use_best_found(ac, e4b);
2125                 return;
2126         }
2127
2128         /*
2129          * Let's check whether the chuck is good enough
2130          */
2131         if (ex->fe_len == gex->fe_len) {
2132                 *bex = *ex;
2133                 ext4_mb_use_best_found(ac, e4b);
2134                 return;
2135         }
2136
2137         /*
2138          * If this is first found extent, just store it in the context
2139          */
2140         if (bex->fe_len == 0) {
2141                 *bex = *ex;
2142                 return;
2143         }
2144
2145         /*
2146          * If new found extent is better, store it in the context
2147          */
2148         if (bex->fe_len < gex->fe_len) {
2149                 /* if the request isn't satisfied, any found extent
2150                  * larger than previous best one is better */
2151                 if (ex->fe_len > bex->fe_len)
2152                         *bex = *ex;
2153         } else if (ex->fe_len > gex->fe_len) {
2154                 /* if the request is satisfied, then we try to find
2155                  * an extent that still satisfy the request, but is
2156                  * smaller than previous one */
2157                 if (ex->fe_len < bex->fe_len)
2158                         *bex = *ex;
2159         }
2160
2161         ext4_mb_check_limits(ac, e4b, 0);
2162 }
2163
2164 static noinline_for_stack
2165 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2166                                         struct ext4_buddy *e4b)
2167 {
2168         struct ext4_free_extent ex = ac->ac_b_ex;
2169         ext4_group_t group = ex.fe_group;
2170         int max;
2171         int err;
2172
2173         BUG_ON(ex.fe_len <= 0);
2174         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2175         if (err)
2176                 return err;
2177
2178         ext4_lock_group(ac->ac_sb, group);
2179         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2180                 goto out;
2181
2182         max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2183
2184         if (max > 0) {
2185                 ac->ac_b_ex = ex;
2186                 ext4_mb_use_best_found(ac, e4b);
2187         }
2188
2189 out:
2190         ext4_unlock_group(ac->ac_sb, group);
2191         ext4_mb_unload_buddy(e4b);
2192
2193         return 0;
2194 }
2195
2196 static noinline_for_stack
2197 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2198                                 struct ext4_buddy *e4b)
2199 {
2200         ext4_group_t group = ac->ac_g_ex.fe_group;
2201         int max;
2202         int err;
2203         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2204         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2205         struct ext4_free_extent ex;
2206
2207         if (!grp)
2208                 return -EFSCORRUPTED;
2209         if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
2210                 return 0;
2211         if (grp->bb_free == 0)
2212                 return 0;
2213
2214         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2215         if (err)
2216                 return err;
2217
2218         ext4_lock_group(ac->ac_sb, group);
2219         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2220                 goto out;
2221
2222         max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2223                              ac->ac_g_ex.fe_len, &ex);
2224         ex.fe_logical = 0xDEADFA11; /* debug value */
2225
2226         if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
2227                 ext4_fsblk_t start;
2228
2229                 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
2230                         ex.fe_start;
2231                 /* use do_div to get remainder (would be 64-bit modulo) */
2232                 if (do_div(start, sbi->s_stripe) == 0) {
2233                         ac->ac_found++;
2234                         ac->ac_b_ex = ex;
2235                         ext4_mb_use_best_found(ac, e4b);
2236                 }
2237         } else if (max >= ac->ac_g_ex.fe_len) {
2238                 BUG_ON(ex.fe_len <= 0);
2239                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2240                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2241                 ac->ac_found++;
2242                 ac->ac_b_ex = ex;
2243                 ext4_mb_use_best_found(ac, e4b);
2244         } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2245                 /* Sometimes, caller may want to merge even small
2246                  * number of blocks to an existing extent */
2247                 BUG_ON(ex.fe_len <= 0);
2248                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2249                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2250                 ac->ac_found++;
2251                 ac->ac_b_ex = ex;
2252                 ext4_mb_use_best_found(ac, e4b);
2253         }
2254 out:
2255         ext4_unlock_group(ac->ac_sb, group);
2256         ext4_mb_unload_buddy(e4b);
2257
2258         return 0;
2259 }
2260
2261 /*
2262  * The routine scans buddy structures (not bitmap!) from given order
2263  * to max order and tries to find big enough chunk to satisfy the req
2264  */
2265 static noinline_for_stack
2266 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2267                                         struct ext4_buddy *e4b)
2268 {
2269         struct super_block *sb = ac->ac_sb;
2270         struct ext4_group_info *grp = e4b->bd_info;
2271         void *buddy;
2272         int i;
2273         int k;
2274         int max;
2275
2276         BUG_ON(ac->ac_2order <= 0);
2277         for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2278                 if (grp->bb_counters[i] == 0)
2279                         continue;
2280
2281                 buddy = mb_find_buddy(e4b, i, &max);
2282                 BUG_ON(buddy == NULL);
2283
2284                 k = mb_find_next_zero_bit(buddy, max, 0);
2285                 if (k >= max) {
2286                         ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2287                                 "%d free clusters of order %d. But found 0",
2288                                 grp->bb_counters[i], i);
2289                         ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2290                                          e4b->bd_group,
2291                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2292                         break;
2293                 }
2294                 ac->ac_found++;
2295
2296                 ac->ac_b_ex.fe_len = 1 << i;
2297                 ac->ac_b_ex.fe_start = k << i;
2298                 ac->ac_b_ex.fe_group = e4b->bd_group;
2299
2300                 ext4_mb_use_best_found(ac, e4b);
2301
2302                 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2303
2304                 if (EXT4_SB(sb)->s_mb_stats)
2305                         atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2306
2307                 break;
2308         }
2309 }
2310
2311 /*
2312  * The routine scans the group and measures all found extents.
2313  * In order to optimize scanning, caller must pass number of
2314  * free blocks in the group, so the routine can know upper limit.
2315  */
2316 static noinline_for_stack
2317 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2318                                         struct ext4_buddy *e4b)
2319 {
2320         struct super_block *sb = ac->ac_sb;
2321         void *bitmap = e4b->bd_bitmap;
2322         struct ext4_free_extent ex;
2323         int i;
2324         int free;
2325
2326         free = e4b->bd_info->bb_free;
2327         if (WARN_ON(free <= 0))
2328                 return;
2329
2330         i = e4b->bd_info->bb_first_free;
2331
2332         while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2333                 i = mb_find_next_zero_bit(bitmap,
2334                                                 EXT4_CLUSTERS_PER_GROUP(sb), i);
2335                 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2336                         /*
2337                          * IF we have corrupt bitmap, we won't find any
2338                          * free blocks even though group info says we
2339                          * have free blocks
2340                          */
2341                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2342                                         "%d free clusters as per "
2343                                         "group info. But bitmap says 0",
2344                                         free);
2345                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2346                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2347                         break;
2348                 }
2349
2350                 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2351                 if (WARN_ON(ex.fe_len <= 0))
2352                         break;
2353                 if (free < ex.fe_len) {
2354                         ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2355                                         "%d free clusters as per "
2356                                         "group info. But got %d blocks",
2357                                         free, ex.fe_len);
2358                         ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2359                                         EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2360                         /*
2361                          * The number of free blocks differs. This mostly
2362                          * indicate that the bitmap is corrupt. So exit
2363                          * without claiming the space.
2364                          */
2365                         break;
2366                 }
2367                 ex.fe_logical = 0xDEADC0DE; /* debug value */
2368                 ext4_mb_measure_extent(ac, &ex, e4b);
2369
2370                 i += ex.fe_len;
2371                 free -= ex.fe_len;
2372         }
2373
2374         ext4_mb_check_limits(ac, e4b, 1);
2375 }
2376
2377 /*
2378  * This is a special case for storages like raid5
2379  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2380  */
2381 static noinline_for_stack
2382 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2383                                  struct ext4_buddy *e4b)
2384 {
2385         struct super_block *sb = ac->ac_sb;
2386         struct ext4_sb_info *sbi = EXT4_SB(sb);
2387         void *bitmap = e4b->bd_bitmap;
2388         struct ext4_free_extent ex;
2389         ext4_fsblk_t first_group_block;
2390         ext4_fsblk_t a;
2391         ext4_grpblk_t i;
2392         int max;
2393
2394         BUG_ON(sbi->s_stripe == 0);
2395
2396         /* find first stripe-aligned block in group */
2397         first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2398
2399         a = first_group_block + sbi->s_stripe - 1;
2400         do_div(a, sbi->s_stripe);
2401         i = (a * sbi->s_stripe) - first_group_block;
2402
2403         while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2404                 if (!mb_test_bit(i, bitmap)) {
2405                         max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2406                         if (max >= sbi->s_stripe) {
2407                                 ac->ac_found++;
2408                                 ex.fe_logical = 0xDEADF00D; /* debug value */
2409                                 ac->ac_b_ex = ex;
2410                                 ext4_mb_use_best_found(ac, e4b);
2411                                 break;
2412                         }
2413                 }
2414                 i += sbi->s_stripe;
2415         }
2416 }
2417
2418 /*
2419  * This is also called BEFORE we load the buddy bitmap.
2420  * Returns either 1 or 0 indicating that the group is either suitable
2421  * for the allocation or not.
2422  */
2423 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2424                                 ext4_group_t group, int cr)
2425 {
2426         ext4_grpblk_t free, fragments;
2427         int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2428         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2429
2430         BUG_ON(cr < 0 || cr >= 4);
2431
2432         if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2433                 return false;
2434
2435         free = grp->bb_free;
2436         if (free == 0)
2437                 return false;
2438
2439         fragments = grp->bb_fragments;
2440         if (fragments == 0)
2441                 return false;
2442
2443         switch (cr) {
2444         case 0:
2445                 BUG_ON(ac->ac_2order == 0);
2446
2447                 /* Avoid using the first bg of a flexgroup for data files */
2448                 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2449                     (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2450                     ((group % flex_size) == 0))
2451                         return false;
2452
2453                 if (free < ac->ac_g_ex.fe_len)
2454                         return false;
2455
2456                 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2457                         return true;
2458
2459                 if (grp->bb_largest_free_order < ac->ac_2order)
2460                         return false;
2461
2462                 return true;
2463         case 1:
2464                 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2465                         return true;
2466                 break;
2467         case 2:
2468                 if (free >= ac->ac_g_ex.fe_len)
2469                         return true;
2470                 break;
2471         case 3:
2472                 return true;
2473         default:
2474                 BUG();
2475         }
2476
2477         return false;
2478 }
2479
2480 /*
2481  * This could return negative error code if something goes wrong
2482  * during ext4_mb_init_group(). This should not be called with
2483  * ext4_lock_group() held.
2484  *
2485  * Note: because we are conditionally operating with the group lock in
2486  * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2487  * function using __acquire and __release.  This means we need to be
2488  * super careful before messing with the error path handling via "goto
2489  * out"!
2490  */
2491 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2492                                      ext4_group_t group, int cr)
2493 {
2494         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2495         struct super_block *sb = ac->ac_sb;
2496         struct ext4_sb_info *sbi = EXT4_SB(sb);
2497         bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2498         ext4_grpblk_t free;
2499         int ret = 0;
2500
2501         if (!grp)
2502                 return -EFSCORRUPTED;
2503         if (sbi->s_mb_stats)
2504                 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2505         if (should_lock) {
2506                 ext4_lock_group(sb, group);
2507                 __release(ext4_group_lock_ptr(sb, group));
2508         }
2509         free = grp->bb_free;
2510         if (free == 0)
2511                 goto out;
2512         if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2513                 goto out;
2514         if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2515                 goto out;
2516         if (should_lock) {
2517                 __acquire(ext4_group_lock_ptr(sb, group));
2518                 ext4_unlock_group(sb, group);
2519         }
2520
2521         /* We only do this if the grp has never been initialized */
2522         if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2523                 struct ext4_group_desc *gdp =
2524                         ext4_get_group_desc(sb, group, NULL);
2525                 int ret;
2526
2527                 /* cr=0/1 is a very optimistic search to find large
2528                  * good chunks almost for free.  If buddy data is not
2529                  * ready, then this optimization makes no sense.  But
2530                  * we never skip the first block group in a flex_bg,
2531                  * since this gets used for metadata block allocation,
2532                  * and we want to make sure we locate metadata blocks
2533                  * in the first block group in the flex_bg if possible.
2534                  */
2535                 if (cr < 2 &&
2536                     (!sbi->s_log_groups_per_flex ||
2537                      ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2538                     !(ext4_has_group_desc_csum(sb) &&
2539                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2540                         return 0;
2541                 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2542                 if (ret)
2543                         return ret;
2544         }
2545
2546         if (should_lock) {
2547                 ext4_lock_group(sb, group);
2548                 __release(ext4_group_lock_ptr(sb, group));
2549         }
2550         ret = ext4_mb_good_group(ac, group, cr);
2551 out:
2552         if (should_lock) {
2553                 __acquire(ext4_group_lock_ptr(sb, group));
2554                 ext4_unlock_group(sb, group);
2555         }
2556         return ret;
2557 }
2558
2559 /*
2560  * Start prefetching @nr block bitmaps starting at @group.
2561  * Return the next group which needs to be prefetched.
2562  */
2563 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2564                               unsigned int nr, int *cnt)
2565 {
2566         ext4_group_t ngroups = ext4_get_groups_count(sb);
2567         struct buffer_head *bh;
2568         struct blk_plug plug;
2569
2570         blk_start_plug(&plug);
2571         while (nr-- > 0) {
2572                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2573                                                                   NULL);
2574                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2575
2576                 /*
2577                  * Prefetch block groups with free blocks; but don't
2578                  * bother if it is marked uninitialized on disk, since
2579                  * it won't require I/O to read.  Also only try to
2580                  * prefetch once, so we avoid getblk() call, which can
2581                  * be expensive.
2582                  */
2583                 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2584                     EXT4_MB_GRP_NEED_INIT(grp) &&
2585                     ext4_free_group_clusters(sb, gdp) > 0 &&
2586                     !(ext4_has_group_desc_csum(sb) &&
2587                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2588                         bh = ext4_read_block_bitmap_nowait(sb, group, true);
2589                         if (bh && !IS_ERR(bh)) {
2590                                 if (!buffer_uptodate(bh) && cnt)
2591                                         (*cnt)++;
2592                                 brelse(bh);
2593                         }
2594                 }
2595                 if (++group >= ngroups)
2596                         group = 0;
2597         }
2598         blk_finish_plug(&plug);
2599         return group;
2600 }
2601
2602 /*
2603  * Prefetching reads the block bitmap into the buffer cache; but we
2604  * need to make sure that the buddy bitmap in the page cache has been
2605  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2606  * is not yet completed, or indeed if it was not initiated by
2607  * ext4_mb_prefetch did not start the I/O.
2608  *
2609  * TODO: We should actually kick off the buddy bitmap setup in a work
2610  * queue when the buffer I/O is completed, so that we don't block
2611  * waiting for the block allocation bitmap read to finish when
2612  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2613  */
2614 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2615                            unsigned int nr)
2616 {
2617         while (nr-- > 0) {
2618                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2619                                                                   NULL);
2620                 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2621
2622                 if (!group)
2623                         group = ext4_get_groups_count(sb);
2624                 group--;
2625                 grp = ext4_get_group_info(sb, group);
2626
2627                 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2628                     ext4_free_group_clusters(sb, gdp) > 0 &&
2629                     !(ext4_has_group_desc_csum(sb) &&
2630                       (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2631                         if (ext4_mb_init_group(sb, group, GFP_NOFS))
2632                                 break;
2633                 }
2634         }
2635 }
2636
2637 static noinline_for_stack int
2638 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2639 {
2640         ext4_group_t prefetch_grp = 0, ngroups, group, i;
2641         int cr = -1, new_cr;
2642         int err = 0, first_err = 0;
2643         unsigned int nr = 0, prefetch_ios = 0;
2644         struct ext4_sb_info *sbi;
2645         struct super_block *sb;
2646         struct ext4_buddy e4b;
2647         int lost;
2648
2649         sb = ac->ac_sb;
2650         sbi = EXT4_SB(sb);
2651         ngroups = ext4_get_groups_count(sb);
2652         /* non-extent files are limited to low blocks/groups */
2653         if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2654                 ngroups = sbi->s_blockfile_groups;
2655
2656         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2657
2658         /* first, try the goal */
2659         err = ext4_mb_find_by_goal(ac, &e4b);
2660         if (err || ac->ac_status == AC_STATUS_FOUND)
2661                 goto out;
2662
2663         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2664                 goto out;
2665
2666         /*
2667          * ac->ac_2order is set only if the fe_len is a power of 2
2668          * if ac->ac_2order is set we also set criteria to 0 so that we
2669          * try exact allocation using buddy.
2670          */
2671         i = fls(ac->ac_g_ex.fe_len);
2672         ac->ac_2order = 0;
2673         /*
2674          * We search using buddy data only if the order of the request
2675          * is greater than equal to the sbi_s_mb_order2_reqs
2676          * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2677          * We also support searching for power-of-two requests only for
2678          * requests upto maximum buddy size we have constructed.
2679          */
2680         if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2681                 /*
2682                  * This should tell if fe_len is exactly power of 2
2683                  */
2684                 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2685                         ac->ac_2order = array_index_nospec(i - 1,
2686                                                            MB_NUM_ORDERS(sb));
2687         }
2688
2689         /* if stream allocation is enabled, use global goal */
2690         if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2691                 /* TBD: may be hot point */
2692                 spin_lock(&sbi->s_md_lock);
2693                 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2694                 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2695                 spin_unlock(&sbi->s_md_lock);
2696         }
2697
2698         /* Let's just scan groups to find more-less suitable blocks */
2699         cr = ac->ac_2order ? 0 : 1;
2700         /*
2701          * cr == 0 try to get exact allocation,
2702          * cr == 3  try to get anything
2703          */
2704 repeat:
2705         for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2706                 ac->ac_criteria = cr;
2707                 /*
2708                  * searching for the right group start
2709                  * from the goal value specified
2710                  */
2711                 group = ac->ac_g_ex.fe_group;
2712                 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2713                 prefetch_grp = group;
2714
2715                 for (i = 0, new_cr = cr; i < ngroups; i++,
2716                      ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2717                         int ret = 0;
2718
2719                         cond_resched();
2720                         if (new_cr != cr) {
2721                                 cr = new_cr;
2722                                 goto repeat;
2723                         }
2724
2725                         /*
2726                          * Batch reads of the block allocation bitmaps
2727                          * to get multiple READs in flight; limit
2728                          * prefetching at cr=0/1, otherwise mballoc can
2729                          * spend a lot of time loading imperfect groups
2730                          */
2731                         if ((prefetch_grp == group) &&
2732                             (cr > 1 ||
2733                              prefetch_ios < sbi->s_mb_prefetch_limit)) {
2734                                 unsigned int curr_ios = prefetch_ios;
2735
2736                                 nr = sbi->s_mb_prefetch;
2737                                 if (ext4_has_feature_flex_bg(sb)) {
2738                                         nr = 1 << sbi->s_log_groups_per_flex;
2739                                         nr -= group & (nr - 1);
2740                                         nr = min(nr, sbi->s_mb_prefetch);
2741                                 }
2742                                 prefetch_grp = ext4_mb_prefetch(sb, group,
2743                                                         nr, &prefetch_ios);
2744                                 if (prefetch_ios == curr_ios)
2745                                         nr = 0;
2746                         }
2747
2748                         /* This now checks without needing the buddy page */
2749                         ret = ext4_mb_good_group_nolock(ac, group, cr);
2750                         if (ret <= 0) {
2751                                 if (!first_err)
2752                                         first_err = ret;
2753                                 continue;
2754                         }
2755
2756                         err = ext4_mb_load_buddy(sb, group, &e4b);
2757                         if (err)
2758                                 goto out;
2759
2760                         ext4_lock_group(sb, group);
2761
2762                         /*
2763                          * We need to check again after locking the
2764                          * block group
2765                          */
2766                         ret = ext4_mb_good_group(ac, group, cr);
2767                         if (ret == 0) {
2768                                 ext4_unlock_group(sb, group);
2769                                 ext4_mb_unload_buddy(&e4b);
2770                                 continue;
2771                         }
2772
2773                         ac->ac_groups_scanned++;
2774                         if (cr == 0)
2775                                 ext4_mb_simple_scan_group(ac, &e4b);
2776                         else if (cr == 1 && sbi->s_stripe &&
2777                                         !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2778                                 ext4_mb_scan_aligned(ac, &e4b);
2779                         else
2780                                 ext4_mb_complex_scan_group(ac, &e4b);
2781
2782                         ext4_unlock_group(sb, group);
2783                         ext4_mb_unload_buddy(&e4b);
2784
2785                         if (ac->ac_status != AC_STATUS_CONTINUE)
2786                                 break;
2787                 }
2788                 /* Processed all groups and haven't found blocks */
2789                 if (sbi->s_mb_stats && i == ngroups)
2790                         atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2791         }
2792
2793         if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2794             !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2795                 /*
2796                  * We've been searching too long. Let's try to allocate
2797                  * the best chunk we've found so far
2798                  */
2799                 ext4_mb_try_best_found(ac, &e4b);
2800                 if (ac->ac_status != AC_STATUS_FOUND) {
2801                         /*
2802                          * Someone more lucky has already allocated it.
2803                          * The only thing we can do is just take first
2804                          * found block(s)
2805                          */
2806                         lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2807                         mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2808                                  ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2809                                  ac->ac_b_ex.fe_len, lost);
2810
2811                         ac->ac_b_ex.fe_group = 0;
2812                         ac->ac_b_ex.fe_start = 0;
2813                         ac->ac_b_ex.fe_len = 0;
2814                         ac->ac_status = AC_STATUS_CONTINUE;
2815                         ac->ac_flags |= EXT4_MB_HINT_FIRST;
2816                         cr = 3;
2817                         goto repeat;
2818                 }
2819         }
2820
2821         if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2822                 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2823 out:
2824         if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2825                 err = first_err;
2826
2827         mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2828                  ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2829                  ac->ac_flags, cr, err);
2830
2831         if (nr)
2832                 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2833
2834         return err;
2835 }
2836
2837 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2838 {
2839         struct super_block *sb = pde_data(file_inode(seq->file));
2840         ext4_group_t group;
2841
2842         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2843                 return NULL;
2844         group = *pos + 1;
2845         return (void *) ((unsigned long) group);
2846 }
2847
2848 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2849 {
2850         struct super_block *sb = pde_data(file_inode(seq->file));
2851         ext4_group_t group;
2852
2853         ++*pos;
2854         if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2855                 return NULL;
2856         group = *pos + 1;
2857         return (void *) ((unsigned long) group);
2858 }
2859
2860 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2861 {
2862         struct super_block *sb = pde_data(file_inode(seq->file));
2863         ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2864         int i;
2865         int err, buddy_loaded = 0;
2866         struct ext4_buddy e4b;
2867         struct ext4_group_info *grinfo;
2868         unsigned char blocksize_bits = min_t(unsigned char,
2869                                              sb->s_blocksize_bits,
2870                                              EXT4_MAX_BLOCK_LOG_SIZE);
2871         struct sg {
2872                 struct ext4_group_info info;
2873                 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2874         } sg;
2875
2876         group--;
2877         if (group == 0)
2878                 seq_puts(seq, "#group: free  frags first ["
2879                               " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2880                               " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2881
2882         i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2883                 sizeof(struct ext4_group_info);
2884
2885         grinfo = ext4_get_group_info(sb, group);
2886         if (!grinfo)
2887                 return 0;
2888         /* Load the group info in memory only if not already loaded. */
2889         if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2890                 err = ext4_mb_load_buddy(sb, group, &e4b);
2891                 if (err) {
2892                         seq_printf(seq, "#%-5u: I/O error\n", group);
2893                         return 0;
2894                 }
2895                 buddy_loaded = 1;
2896         }
2897
2898         memcpy(&sg, grinfo, i);
2899
2900         if (buddy_loaded)
2901                 ext4_mb_unload_buddy(&e4b);
2902
2903         seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2904                         sg.info.bb_fragments, sg.info.bb_first_free);
2905         for (i = 0; i <= 13; i++)
2906                 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2907                                 sg.info.bb_counters[i] : 0);
2908         seq_puts(seq, " ]");
2909         if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
2910                 seq_puts(seq, " Block bitmap corrupted!");
2911         seq_puts(seq, "\n");
2912
2913         return 0;
2914 }
2915
2916 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2917 {
2918 }
2919
2920 const struct seq_operations ext4_mb_seq_groups_ops = {
2921         .start  = ext4_mb_seq_groups_start,
2922         .next   = ext4_mb_seq_groups_next,
2923         .stop   = ext4_mb_seq_groups_stop,
2924         .show   = ext4_mb_seq_groups_show,
2925 };
2926
2927 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2928 {
2929         struct super_block *sb = seq->private;
2930         struct ext4_sb_info *sbi = EXT4_SB(sb);
2931
2932         seq_puts(seq, "mballoc:\n");
2933         if (!sbi->s_mb_stats) {
2934                 seq_puts(seq, "\tmb stats collection turned off.\n");
2935                 seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2936                 return 0;
2937         }
2938         seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2939         seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2940
2941         seq_printf(seq, "\tgroups_scanned: %u\n",  atomic_read(&sbi->s_bal_groups_scanned));
2942
2943         seq_puts(seq, "\tcr0_stats:\n");
2944         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2945         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2946                    atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2947         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2948                    atomic64_read(&sbi->s_bal_cX_failed[0]));
2949         seq_printf(seq, "\t\tbad_suggestions: %u\n",
2950                    atomic_read(&sbi->s_bal_cr0_bad_suggestions));
2951
2952         seq_puts(seq, "\tcr1_stats:\n");
2953         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2954         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2955                    atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2956         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2957                    atomic64_read(&sbi->s_bal_cX_failed[1]));
2958         seq_printf(seq, "\t\tbad_suggestions: %u\n",
2959                    atomic_read(&sbi->s_bal_cr1_bad_suggestions));
2960
2961         seq_puts(seq, "\tcr2_stats:\n");
2962         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2963         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2964                    atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2965         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2966                    atomic64_read(&sbi->s_bal_cX_failed[2]));
2967
2968         seq_puts(seq, "\tcr3_stats:\n");
2969         seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2970         seq_printf(seq, "\t\tgroups_considered: %llu\n",
2971                    atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2972         seq_printf(seq, "\t\tuseless_loops: %llu\n",
2973                    atomic64_read(&sbi->s_bal_cX_failed[3]));
2974         seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2975         seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2976         seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2977         seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2978         seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2979
2980         seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2981                    atomic_read(&sbi->s_mb_buddies_generated),
2982                    ext4_get_groups_count(sb));
2983         seq_printf(seq, "\tbuddies_time_used: %llu\n",
2984                    atomic64_read(&sbi->s_mb_generation_time));
2985         seq_printf(seq, "\tpreallocated: %u\n",
2986                    atomic_read(&sbi->s_mb_preallocated));
2987         seq_printf(seq, "\tdiscarded: %u\n",
2988                    atomic_read(&sbi->s_mb_discarded));
2989         return 0;
2990 }
2991
2992 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
2993 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
2994 {
2995         struct super_block *sb = pde_data(file_inode(seq->file));
2996         unsigned long position;
2997
2998         if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2999                 return NULL;
3000         position = *pos + 1;
3001         return (void *) ((unsigned long) position);
3002 }
3003
3004 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3005 {
3006         struct super_block *sb = pde_data(file_inode(seq->file));
3007         unsigned long position;
3008
3009         ++*pos;
3010         if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3011                 return NULL;
3012         position = *pos + 1;
3013         return (void *) ((unsigned long) position);
3014 }
3015
3016 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3017 {
3018         struct super_block *sb = pde_data(file_inode(seq->file));
3019         struct ext4_sb_info *sbi = EXT4_SB(sb);
3020         unsigned long position = ((unsigned long) v);
3021         struct ext4_group_info *grp;
3022         unsigned int count;
3023
3024         position--;
3025         if (position >= MB_NUM_ORDERS(sb)) {
3026                 position -= MB_NUM_ORDERS(sb);
3027                 if (position == 0)
3028                         seq_puts(seq, "avg_fragment_size_lists:\n");
3029
3030                 count = 0;
3031                 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
3032                 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
3033                                     bb_avg_fragment_size_node)
3034                         count++;
3035                 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
3036                 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3037                                         (unsigned int)position, count);
3038                 return 0;
3039         }
3040
3041         if (position == 0) {
3042                 seq_printf(seq, "optimize_scan: %d\n",
3043                            test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3044                 seq_puts(seq, "max_free_order_lists:\n");
3045         }
3046         count = 0;
3047         read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
3048         list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3049                             bb_largest_free_order_node)
3050                 count++;
3051         read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3052         seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3053                    (unsigned int)position, count);
3054
3055         return 0;
3056 }
3057
3058 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3059 {
3060 }
3061
3062 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3063         .start  = ext4_mb_seq_structs_summary_start,
3064         .next   = ext4_mb_seq_structs_summary_next,
3065         .stop   = ext4_mb_seq_structs_summary_stop,
3066         .show   = ext4_mb_seq_structs_summary_show,
3067 };
3068
3069 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3070 {
3071         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3072         struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3073
3074         BUG_ON(!cachep);
3075         return cachep;
3076 }
3077
3078 /*
3079  * Allocate the top-level s_group_info array for the specified number
3080  * of groups
3081  */
3082 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3083 {
3084         struct ext4_sb_info *sbi = EXT4_SB(sb);
3085         unsigned size;
3086         struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3087
3088         size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3089                 EXT4_DESC_PER_BLOCK_BITS(sb);
3090         if (size <= sbi->s_group_info_size)
3091                 return 0;
3092
3093         size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3094         new_groupinfo = kvzalloc(size, GFP_KERNEL);
3095         if (!new_groupinfo) {
3096                 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3097                 return -ENOMEM;
3098         }
3099         rcu_read_lock();
3100         old_groupinfo = rcu_dereference(sbi->s_group_info);
3101         if (old_groupinfo)
3102                 memcpy(new_groupinfo, old_groupinfo,
3103                        sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3104         rcu_read_unlock();
3105         rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3106         sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3107         if (old_groupinfo)
3108                 ext4_kvfree_array_rcu(old_groupinfo);
3109         ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3110                    sbi->s_group_info_size);
3111         return 0;
3112 }
3113
3114 /* Create and initialize ext4_group_info data for the given group. */
3115 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3116                           struct ext4_group_desc *desc)
3117 {
3118         int i;
3119         int metalen = 0;
3120         int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3121         struct ext4_sb_info *sbi = EXT4_SB(sb);
3122         struct ext4_group_info **meta_group_info;
3123         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3124
3125         /*
3126          * First check if this group is the first of a reserved block.
3127          * If it's true, we have to allocate a new table of pointers
3128          * to ext4_group_info structures
3129          */
3130         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3131                 metalen = sizeof(*meta_group_info) <<
3132                         EXT4_DESC_PER_BLOCK_BITS(sb);
3133                 meta_group_info = kmalloc(metalen, GFP_NOFS);
3134                 if (meta_group_info == NULL) {
3135                         ext4_msg(sb, KERN_ERR, "can't allocate mem "
3136                                  "for a buddy group");
3137                         goto exit_meta_group_info;
3138                 }
3139                 rcu_read_lock();
3140                 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3141                 rcu_read_unlock();
3142         }
3143
3144         meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3145         i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3146
3147         meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3148         if (meta_group_info[i] == NULL) {
3149                 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3150                 goto exit_group_info;
3151         }
3152         set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3153                 &(meta_group_info[i]->bb_state));
3154
3155         /*
3156          * initialize bb_free to be able to skip
3157          * empty groups without initialization
3158          */
3159         if (ext4_has_group_desc_csum(sb) &&
3160             (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3161                 meta_group_info[i]->bb_free =
3162                         ext4_free_clusters_after_init(sb, group, desc);
3163         } else {
3164                 meta_group_info[i]->bb_free =
3165                         ext4_free_group_clusters(sb, desc);
3166         }
3167
3168         INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3169         init_rwsem(&meta_group_info[i]->alloc_sem);
3170         meta_group_info[i]->bb_free_root = RB_ROOT;
3171         INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3172         INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3173         meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3174         meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
3175         meta_group_info[i]->bb_group = group;
3176
3177         mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3178         return 0;
3179
3180 exit_group_info:
3181         /* If a meta_group_info table has been allocated, release it now */
3182         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3183                 struct ext4_group_info ***group_info;
3184
3185                 rcu_read_lock();
3186                 group_info = rcu_dereference(sbi->s_group_info);
3187                 kfree(group_info[idx]);
3188                 group_info[idx] = NULL;
3189                 rcu_read_unlock();
3190         }
3191 exit_meta_group_info:
3192         return -ENOMEM;
3193 } /* ext4_mb_add_groupinfo */
3194
3195 static int ext4_mb_init_backend(struct super_block *sb)
3196 {
3197         ext4_group_t ngroups = ext4_get_groups_count(sb);
3198         ext4_group_t i;
3199         struct ext4_sb_info *sbi = EXT4_SB(sb);
3200         int err;
3201         struct ext4_group_desc *desc;
3202         struct ext4_group_info ***group_info;
3203         struct kmem_cache *cachep;
3204
3205         err = ext4_mb_alloc_groupinfo(sb, ngroups);
3206         if (err)
3207                 return err;
3208
3209         sbi->s_buddy_cache = new_inode(sb);
3210         if (sbi->s_buddy_cache == NULL) {
3211                 ext4_msg(sb, KERN_ERR, "can't get new inode");
3212                 goto err_freesgi;
3213         }
3214         /* To avoid potentially colliding with an valid on-disk inode number,
3215          * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3216          * not in the inode hash, so it should never be found by iget(), but
3217          * this will avoid confusion if it ever shows up during debugging. */
3218         sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3219         EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3220         for (i = 0; i < ngroups; i++) {
3221                 cond_resched();
3222                 desc = ext4_get_group_desc(sb, i, NULL);
3223                 if (desc == NULL) {
3224                         ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3225                         goto err_freebuddy;
3226                 }
3227                 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3228                         goto err_freebuddy;
3229         }
3230
3231         if (ext4_has_feature_flex_bg(sb)) {
3232                 /* a single flex group is supposed to be read by a single IO.
3233                  * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3234                  * unsigned integer, so the maximum shift is 32.
3235                  */
3236                 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3237                         ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3238                         goto err_freebuddy;
3239                 }
3240                 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3241                         BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3242                 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3243         } else {
3244                 sbi->s_mb_prefetch = 32;
3245         }
3246         if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3247                 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3248         /* now many real IOs to prefetch within a single allocation at cr=0
3249          * given cr=0 is an CPU-related optimization we shouldn't try to
3250          * load too many groups, at some point we should start to use what
3251          * we've got in memory.
3252          * with an average random access time 5ms, it'd take a second to get
3253          * 200 groups (* N with flex_bg), so let's make this limit 4
3254          */
3255         sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3256         if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3257                 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3258
3259         return 0;
3260
3261 err_freebuddy:
3262         cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3263         while (i-- > 0) {
3264                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3265
3266                 if (grp)
3267                         kmem_cache_free(cachep, grp);
3268         }
3269         i = sbi->s_group_info_size;
3270         rcu_read_lock();
3271         group_info = rcu_dereference(sbi->s_group_info);
3272         while (i-- > 0)
3273                 kfree(group_info[i]);
3274         rcu_read_unlock();
3275         iput(sbi->s_buddy_cache);
3276 err_freesgi:
3277         rcu_read_lock();
3278         kvfree(rcu_dereference(sbi->s_group_info));
3279         rcu_read_unlock();
3280         return -ENOMEM;
3281 }
3282
3283 static void ext4_groupinfo_destroy_slabs(void)
3284 {
3285         int i;
3286
3287         for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3288                 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3289                 ext4_groupinfo_caches[i] = NULL;
3290         }
3291 }
3292
3293 static int ext4_groupinfo_create_slab(size_t size)
3294 {
3295         static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3296         int slab_size;
3297         int blocksize_bits = order_base_2(size);
3298         int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3299         struct kmem_cache *cachep;
3300
3301         if (cache_index >= NR_GRPINFO_CACHES)
3302                 return -EINVAL;
3303
3304         if (unlikely(cache_index < 0))
3305                 cache_index = 0;
3306
3307         mutex_lock(&ext4_grpinfo_slab_create_mutex);
3308         if (ext4_groupinfo_caches[cache_index]) {
3309                 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3310                 return 0;       /* Already created */
3311         }
3312
3313         slab_size = offsetof(struct ext4_group_info,
3314                                 bb_counters[blocksize_bits + 2]);
3315
3316         cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3317                                         slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3318                                         NULL);
3319
3320         ext4_groupinfo_caches[cache_index] = cachep;
3321
3322         mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3323         if (!cachep) {
3324                 printk(KERN_EMERG
3325                        "EXT4-fs: no memory for groupinfo slab cache\n");
3326                 return -ENOMEM;
3327         }
3328
3329         return 0;
3330 }
3331
3332 static void ext4_discard_work(struct work_struct *work)
3333 {
3334         struct ext4_sb_info *sbi = container_of(work,
3335                         struct ext4_sb_info, s_discard_work);
3336         struct super_block *sb = sbi->s_sb;
3337         struct ext4_free_data *fd, *nfd;
3338         struct ext4_buddy e4b;
3339         struct list_head discard_list;
3340         ext4_group_t grp, load_grp;
3341         int err = 0;
3342
3343         INIT_LIST_HEAD(&discard_list);
3344         spin_lock(&sbi->s_md_lock);
3345         list_splice_init(&sbi->s_discard_list, &discard_list);
3346         spin_unlock(&sbi->s_md_lock);
3347
3348         load_grp = UINT_MAX;
3349         list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3350                 /*
3351                  * If filesystem is umounting or no memory or suffering
3352                  * from no space, give up the discard
3353                  */
3354                 if ((sb->s_flags & SB_ACTIVE) && !err &&
3355                     !atomic_read(&sbi->s_retry_alloc_pending)) {
3356                         grp = fd->efd_group;
3357                         if (grp != load_grp) {
3358                                 if (load_grp != UINT_MAX)
3359                                         ext4_mb_unload_buddy(&e4b);
3360
3361                                 err = ext4_mb_load_buddy(sb, grp, &e4b);
3362                                 if (err) {
3363                                         kmem_cache_free(ext4_free_data_cachep, fd);
3364                                         load_grp = UINT_MAX;
3365                                         continue;
3366                                 } else {
3367                                         load_grp = grp;
3368                                 }
3369                         }
3370
3371                         ext4_lock_group(sb, grp);
3372                         ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3373                                                 fd->efd_start_cluster + fd->efd_count - 1, 1);
3374                         ext4_unlock_group(sb, grp);
3375                 }
3376                 kmem_cache_free(ext4_free_data_cachep, fd);
3377         }
3378
3379         if (load_grp != UINT_MAX)
3380                 ext4_mb_unload_buddy(&e4b);
3381 }
3382
3383 int ext4_mb_init(struct super_block *sb)
3384 {
3385         struct ext4_sb_info *sbi = EXT4_SB(sb);
3386         unsigned i, j;
3387         unsigned offset, offset_incr;
3388         unsigned max;
3389         int ret;
3390
3391         i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3392
3393         sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3394         if (sbi->s_mb_offsets == NULL) {
3395                 ret = -ENOMEM;
3396                 goto out;
3397         }
3398
3399         i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3400         sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3401         if (sbi->s_mb_maxs == NULL) {
3402                 ret = -ENOMEM;
3403                 goto out;
3404         }
3405
3406         ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3407         if (ret < 0)
3408                 goto out;
3409
3410         /* order 0 is regular bitmap */
3411         sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3412         sbi->s_mb_offsets[0] = 0;
3413
3414         i = 1;
3415         offset = 0;
3416         offset_incr = 1 << (sb->s_blocksize_bits - 1);
3417         max = sb->s_blocksize << 2;
3418         do {
3419                 sbi->s_mb_offsets[i] = offset;
3420                 sbi->s_mb_maxs[i] = max;
3421                 offset += offset_incr;
3422                 offset_incr = offset_incr >> 1;
3423                 max = max >> 1;
3424                 i++;
3425         } while (i < MB_NUM_ORDERS(sb));
3426
3427         sbi->s_mb_avg_fragment_size =
3428                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3429                         GFP_KERNEL);
3430         if (!sbi->s_mb_avg_fragment_size) {
3431                 ret = -ENOMEM;
3432                 goto out;
3433         }
3434         sbi->s_mb_avg_fragment_size_locks =
3435                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3436                         GFP_KERNEL);
3437         if (!sbi->s_mb_avg_fragment_size_locks) {
3438                 ret = -ENOMEM;
3439                 goto out;
3440         }
3441         for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3442                 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3443                 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3444         }
3445         sbi->s_mb_largest_free_orders =
3446                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3447                         GFP_KERNEL);
3448         if (!sbi->s_mb_largest_free_orders) {
3449                 ret = -ENOMEM;
3450                 goto out;
3451         }
3452         sbi->s_mb_largest_free_orders_locks =
3453                 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3454                         GFP_KERNEL);
3455         if (!sbi->s_mb_largest_free_orders_locks) {
3456                 ret = -ENOMEM;
3457                 goto out;
3458         }
3459         for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3460                 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3461                 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3462         }
3463
3464         spin_lock_init(&sbi->s_md_lock);
3465         sbi->s_mb_free_pending = 0;
3466         INIT_LIST_HEAD(&sbi->s_freed_data_list);
3467         INIT_LIST_HEAD(&sbi->s_discard_list);
3468         INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3469         atomic_set(&sbi->s_retry_alloc_pending, 0);
3470
3471         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3472         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3473         sbi->s_mb_stats = MB_DEFAULT_STATS;
3474         sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3475         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3476         sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
3477         /*
3478          * The default group preallocation is 512, which for 4k block
3479          * sizes translates to 2 megabytes.  However for bigalloc file
3480          * systems, this is probably too big (i.e, if the cluster size
3481          * is 1 megabyte, then group preallocation size becomes half a
3482          * gigabyte!).  As a default, we will keep a two megabyte
3483          * group pralloc size for cluster sizes up to 64k, and after
3484          * that, we will force a minimum group preallocation size of
3485          * 32 clusters.  This translates to 8 megs when the cluster
3486          * size is 256k, and 32 megs when the cluster size is 1 meg,
3487          * which seems reasonable as a default.
3488          */
3489         sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3490                                        sbi->s_cluster_bits, 32);
3491         /*
3492          * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3493          * to the lowest multiple of s_stripe which is bigger than
3494          * the s_mb_group_prealloc as determined above. We want
3495          * the preallocation size to be an exact multiple of the
3496          * RAID stripe size so that preallocations don't fragment
3497          * the stripes.
3498          */
3499         if (sbi->s_stripe > 1) {
3500                 sbi->s_mb_group_prealloc = roundup(
3501                         sbi->s_mb_group_prealloc, sbi->s_stripe);
3502         }
3503
3504         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3505         if (sbi->s_locality_groups == NULL) {
3506                 ret = -ENOMEM;
3507                 goto out;
3508         }
3509         for_each_possible_cpu(i) {
3510                 struct ext4_locality_group *lg;
3511                 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3512                 mutex_init(&lg->lg_mutex);
3513                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3514                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3515                 spin_lock_init(&lg->lg_prealloc_lock);
3516         }
3517
3518         if (bdev_nonrot(sb->s_bdev))
3519                 sbi->s_mb_max_linear_groups = 0;
3520         else
3521                 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3522         /* init file for buddy data */
3523         ret = ext4_mb_init_backend(sb);
3524         if (ret != 0)
3525                 goto out_free_locality_groups;
3526
3527         return 0;
3528
3529 out_free_locality_groups:
3530         free_percpu(sbi->s_locality_groups);
3531         sbi->s_locality_groups = NULL;
3532 out:
3533         kfree(sbi->s_mb_avg_fragment_size);
3534         kfree(sbi->s_mb_avg_fragment_size_locks);
3535         kfree(sbi->s_mb_largest_free_orders);
3536         kfree(sbi->s_mb_largest_free_orders_locks);
3537         kfree(sbi->s_mb_offsets);
3538         sbi->s_mb_offsets = NULL;
3539         kfree(sbi->s_mb_maxs);
3540         sbi->s_mb_maxs = NULL;
3541         return ret;
3542 }
3543
3544 /* need to called with the ext4 group lock held */
3545 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3546 {
3547         struct ext4_prealloc_space *pa;
3548         struct list_head *cur, *tmp;
3549         int count = 0;
3550
3551         list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3552                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3553                 list_del(&pa->pa_group_list);
3554                 count++;
3555                 kmem_cache_free(ext4_pspace_cachep, pa);
3556         }
3557         return count;
3558 }
3559
3560 int ext4_mb_release(struct super_block *sb)
3561 {
3562         ext4_group_t ngroups = ext4_get_groups_count(sb);
3563         ext4_group_t i;
3564         int num_meta_group_infos;
3565         struct ext4_group_info *grinfo, ***group_info;
3566         struct ext4_sb_info *sbi = EXT4_SB(sb);
3567         struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3568         int count;
3569
3570         if (test_opt(sb, DISCARD)) {
3571                 /*
3572                  * wait the discard work to drain all of ext4_free_data
3573                  */
3574                 flush_work(&sbi->s_discard_work);
3575                 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3576         }
3577
3578         if (sbi->s_group_info) {
3579                 for (i = 0; i < ngroups; i++) {
3580                         cond_resched();
3581                         grinfo = ext4_get_group_info(sb, i);
3582                         if (!grinfo)
3583                                 continue;
3584                         mb_group_bb_bitmap_free(grinfo);
3585                         ext4_lock_group(sb, i);
3586                         count = ext4_mb_cleanup_pa(grinfo);
3587                         if (count)
3588                                 mb_debug(sb, "mballoc: %d PAs left\n",
3589                                          count);
3590                         ext4_unlock_group(sb, i);
3591                         kmem_cache_free(cachep, grinfo);
3592                 }
3593                 num_meta_group_infos = (ngroups +
3594                                 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3595                         EXT4_DESC_PER_BLOCK_BITS(sb);
3596                 rcu_read_lock();
3597                 group_info = rcu_dereference(sbi->s_group_info);
3598                 for (i = 0; i < num_meta_group_infos; i++)
3599                         kfree(group_info[i]);
3600                 kvfree(group_info);
3601                 rcu_read_unlock();
3602         }
3603         kfree(sbi->s_mb_avg_fragment_size);
3604         kfree(sbi->s_mb_avg_fragment_size_locks);
3605         kfree(sbi->s_mb_largest_free_orders);
3606         kfree(sbi->s_mb_largest_free_orders_locks);
3607         kfree(sbi->s_mb_offsets);
3608         kfree(sbi->s_mb_maxs);
3609         iput(sbi->s_buddy_cache);
3610         if (sbi->s_mb_stats) {
3611                 ext4_msg(sb, KERN_INFO,
3612                        "mballoc: %u blocks %u reqs (%u success)",
3613                                 atomic_read(&sbi->s_bal_allocated),
3614                                 atomic_read(&sbi->s_bal_reqs),
3615                                 atomic_read(&sbi->s_bal_success));
3616                 ext4_msg(sb, KERN_INFO,
3617                       "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3618                                 "%u 2^N hits, %u breaks, %u lost",
3619                                 atomic_read(&sbi->s_bal_ex_scanned),
3620                                 atomic_read(&sbi->s_bal_groups_scanned),
3621                                 atomic_read(&sbi->s_bal_goals),
3622                                 atomic_read(&sbi->s_bal_2orders),
3623                                 atomic_read(&sbi->s_bal_breaks),
3624                                 atomic_read(&sbi->s_mb_lost_chunks));
3625                 ext4_msg(sb, KERN_INFO,
3626                        "mballoc: %u generated and it took %llu",
3627                                 atomic_read(&sbi->s_mb_buddies_generated),
3628                                 atomic64_read(&sbi->s_mb_generation_time));
3629                 ext4_msg(sb, KERN_INFO,
3630                        "mballoc: %u preallocated, %u discarded",
3631                                 atomic_read(&sbi->s_mb_preallocated),
3632                                 atomic_read(&sbi->s_mb_discarded));
3633         }
3634
3635         free_percpu(sbi->s_locality_groups);
3636
3637         return 0;
3638 }
3639
3640 static inline int ext4_issue_discard(struct super_block *sb,
3641                 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3642                 struct bio **biop)
3643 {
3644         ext4_fsblk_t discard_block;
3645
3646         discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3647                          ext4_group_first_block_no(sb, block_group));
3648         count = EXT4_C2B(EXT4_SB(sb), count);
3649         trace_ext4_discard_blocks(sb,
3650                         (unsigned long long) discard_block, count);
3651         if (biop) {
3652                 return __blkdev_issue_discard(sb->s_bdev,
3653                         (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3654                         (sector_t)count << (sb->s_blocksize_bits - 9),
3655                         GFP_NOFS, biop);
3656         } else
3657                 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3658 }
3659
3660 static void ext4_free_data_in_buddy(struct super_block *sb,
3661                                     struct ext4_free_data *entry)
3662 {
3663         struct ext4_buddy e4b;
3664         struct ext4_group_info *db;
3665         int err, count = 0, count2 = 0;
3666
3667         mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3668                  entry->efd_count, entry->efd_group, entry);
3669
3670         err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3671         /* we expect to find existing buddy because it's pinned */
3672         BUG_ON(err != 0);
3673
3674         spin_lock(&EXT4_SB(sb)->s_md_lock);
3675         EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3676         spin_unlock(&EXT4_SB(sb)->s_md_lock);
3677
3678         db = e4b.bd_info;
3679         /* there are blocks to put in buddy to make them really free */
3680         count += entry->efd_count;
3681         count2++;
3682         ext4_lock_group(sb, entry->efd_group);
3683         /* Take it out of per group rb tree */
3684         rb_erase(&entry->efd_node, &(db->bb_free_root));
3685         mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3686
3687         /*
3688          * Clear the trimmed flag for the group so that the next
3689          * ext4_trim_fs can trim it.
3690          * If the volume is mounted with -o discard, online discard
3691          * is supported and the free blocks will be trimmed online.
3692          */
3693         if (!test_opt(sb, DISCARD))
3694                 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3695
3696         if (!db->bb_free_root.rb_node) {
3697                 /* No more items in the per group rb tree
3698                  * balance refcounts from ext4_mb_free_metadata()
3699                  */
3700                 put_page(e4b.bd_buddy_page);
3701                 put_page(e4b.bd_bitmap_page);
3702         }
3703         ext4_unlock_group(sb, entry->efd_group);
3704         ext4_mb_unload_buddy(&e4b);
3705
3706         mb_debug(sb, "freed %d blocks in %d structures\n", count,
3707                  count2);
3708 }
3709
3710 /*
3711  * This function is called by the jbd2 layer once the commit has finished,
3712  * so we know we can free the blocks that were released with that commit.
3713  */
3714 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3715 {
3716         struct ext4_sb_info *sbi = EXT4_SB(sb);
3717         struct ext4_free_data *entry, *tmp;
3718         struct list_head freed_data_list;
3719         struct list_head *cut_pos = NULL;
3720         bool wake;
3721
3722         INIT_LIST_HEAD(&freed_data_list);
3723
3724         spin_lock(&sbi->s_md_lock);
3725         list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3726                 if (entry->efd_tid != commit_tid)
3727                         break;
3728                 cut_pos = &entry->efd_list;
3729         }
3730         if (cut_pos)
3731                 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3732                                   cut_pos);
3733         spin_unlock(&sbi->s_md_lock);
3734
3735         list_for_each_entry(entry, &freed_data_list, efd_list)
3736                 ext4_free_data_in_buddy(sb, entry);
3737
3738         if (test_opt(sb, DISCARD)) {
3739                 spin_lock(&sbi->s_md_lock);
3740                 wake = list_empty(&sbi->s_discard_list);
3741                 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3742                 spin_unlock(&sbi->s_md_lock);
3743                 if (wake)
3744                         queue_work(system_unbound_wq, &sbi->s_discard_work);
3745         } else {
3746                 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3747                         kmem_cache_free(ext4_free_data_cachep, entry);
3748         }
3749 }
3750
3751 int __init ext4_init_mballoc(void)
3752 {
3753         ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3754                                         SLAB_RECLAIM_ACCOUNT);
3755         if (ext4_pspace_cachep == NULL)
3756                 goto out;
3757
3758         ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3759                                     SLAB_RECLAIM_ACCOUNT);
3760         if (ext4_ac_cachep == NULL)
3761                 goto out_pa_free;
3762
3763         ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3764                                            SLAB_RECLAIM_ACCOUNT);
3765         if (ext4_free_data_cachep == NULL)
3766                 goto out_ac_free;
3767
3768         return 0;
3769
3770 out_ac_free:
3771         kmem_cache_destroy(ext4_ac_cachep);
3772 out_pa_free:
3773         kmem_cache_destroy(ext4_pspace_cachep);
3774 out:
3775         return -ENOMEM;
3776 }
3777
3778 void ext4_exit_mballoc(void)
3779 {
3780         /*
3781          * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3782          * before destroying the slab cache.
3783          */
3784         rcu_barrier();
3785         kmem_cache_destroy(ext4_pspace_cachep);
3786         kmem_cache_destroy(ext4_ac_cachep);
3787         kmem_cache_destroy(ext4_free_data_cachep);
3788         ext4_groupinfo_destroy_slabs();
3789 }
3790
3791
3792 /*
3793  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3794  * Returns 0 if success or error code
3795  */
3796 static noinline_for_stack int
3797 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3798                                 handle_t *handle, unsigned int reserv_clstrs)
3799 {
3800         struct buffer_head *bitmap_bh = NULL;
3801         struct ext4_group_desc *gdp;
3802         struct buffer_head *gdp_bh;
3803         struct ext4_sb_info *sbi;
3804         struct super_block *sb;
3805         ext4_fsblk_t block;
3806         int err, len;
3807
3808         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3809         BUG_ON(ac->ac_b_ex.fe_len <= 0);
3810
3811         sb = ac->ac_sb;
3812         sbi = EXT4_SB(sb);
3813
3814         bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3815         if (IS_ERR(bitmap_bh)) {
3816                 err = PTR_ERR(bitmap_bh);
3817                 bitmap_bh = NULL;
3818                 goto out_err;
3819         }
3820
3821         BUFFER_TRACE(bitmap_bh, "getting write access");
3822         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3823                                             EXT4_JTR_NONE);
3824         if (err)
3825                 goto out_err;
3826
3827         err = -EIO;
3828         gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3829         if (!gdp)
3830                 goto out_err;
3831
3832         ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3833                         ext4_free_group_clusters(sb, gdp));
3834
3835         BUFFER_TRACE(gdp_bh, "get_write_access");
3836         err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
3837         if (err)
3838                 goto out_err;
3839
3840         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3841
3842         len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3843         if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3844                 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3845                            "fs metadata", block, block+len);
3846                 /* File system mounted not to panic on error
3847                  * Fix the bitmap and return EFSCORRUPTED
3848                  * We leak some of the blocks here.
3849                  */
3850                 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3851                 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3852                               ac->ac_b_ex.fe_len);
3853                 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3854                 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3855                 if (!err)
3856                         err = -EFSCORRUPTED;
3857                 goto out_err;
3858         }
3859
3860         ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3861 #ifdef AGGRESSIVE_CHECK
3862         {
3863                 int i;
3864                 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3865                         BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3866                                                 bitmap_bh->b_data));
3867                 }
3868         }
3869 #endif
3870         mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3871                       ac->ac_b_ex.fe_len);
3872         if (ext4_has_group_desc_csum(sb) &&
3873             (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3874                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3875                 ext4_free_group_clusters_set(sb, gdp,
3876                                              ext4_free_clusters_after_init(sb,
3877                                                 ac->ac_b_ex.fe_group, gdp));
3878         }
3879         len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3880         ext4_free_group_clusters_set(sb, gdp, len);
3881         ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3882         ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3883
3884         ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3885         percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3886         /*
3887          * Now reduce the dirty block count also. Should not go negative
3888          */
3889         if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3890                 /* release all the reserved blocks if non delalloc */
3891                 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3892                                    reserv_clstrs);
3893
3894         if (sbi->s_log_groups_per_flex) {
3895                 ext4_group_t flex_group = ext4_flex_group(sbi,
3896                                                           ac->ac_b_ex.fe_group);
3897                 atomic64_sub(ac->ac_b_ex.fe_len,
3898                              &sbi_array_rcu_deref(sbi, s_flex_groups,
3899                                                   flex_group)->free_clusters);
3900         }
3901
3902         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3903         if (err)
3904                 goto out_err;
3905         err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3906
3907 out_err:
3908         brelse(bitmap_bh);
3909         return err;
3910 }
3911
3912 /*
3913  * Idempotent helper for Ext4 fast commit replay path to set the state of
3914  * blocks in bitmaps and update counters.
3915  */
3916 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3917                         int len, int state)
3918 {
3919         struct buffer_head *bitmap_bh = NULL;
3920         struct ext4_group_desc *gdp;
3921         struct buffer_head *gdp_bh;
3922         struct ext4_sb_info *sbi = EXT4_SB(sb);
3923         ext4_group_t group;
3924         ext4_grpblk_t blkoff;
3925         int i, err;
3926         int already;
3927         unsigned int clen, clen_changed, thisgrp_len;
3928
3929         while (len > 0) {
3930                 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3931
3932                 /*
3933                  * Check to see if we are freeing blocks across a group
3934                  * boundary.
3935                  * In case of flex_bg, this can happen that (block, len) may
3936                  * span across more than one group. In that case we need to
3937                  * get the corresponding group metadata to work with.
3938                  * For this we have goto again loop.
3939                  */
3940                 thisgrp_len = min_t(unsigned int, (unsigned int)len,
3941                         EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3942                 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3943
3944                 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
3945                         ext4_error(sb, "Marking blocks in system zone - "
3946                                    "Block = %llu, len = %u",
3947                                    block, thisgrp_len);
3948                         bitmap_bh = NULL;
3949                         break;
3950                 }
3951
3952                 bitmap_bh = ext4_read_block_bitmap(sb, group);
3953                 if (IS_ERR(bitmap_bh)) {
3954                         err = PTR_ERR(bitmap_bh);
3955                         bitmap_bh = NULL;
3956                         break;
3957                 }
3958
3959                 err = -EIO;
3960                 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3961                 if (!gdp)
3962                         break;
3963
3964                 ext4_lock_group(sb, group);
3965                 already = 0;
3966                 for (i = 0; i < clen; i++)
3967                         if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3968                                          !state)
3969                                 already++;
3970
3971                 clen_changed = clen - already;
3972                 if (state)
3973                         mb_set_bits(bitmap_bh->b_data, blkoff, clen);
3974                 else
3975                         mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
3976                 if (ext4_has_group_desc_csum(sb) &&
3977                     (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3978                         gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3979                         ext4_free_group_clusters_set(sb, gdp,
3980                              ext4_free_clusters_after_init(sb, group, gdp));
3981                 }
3982                 if (state)
3983                         clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3984                 else
3985                         clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3986
3987                 ext4_free_group_clusters_set(sb, gdp, clen);
3988                 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3989                 ext4_group_desc_csum_set(sb, group, gdp);
3990
3991                 ext4_unlock_group(sb, group);
3992
3993                 if (sbi->s_log_groups_per_flex) {
3994                         ext4_group_t flex_group = ext4_flex_group(sbi, group);
3995                         struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3996                                                    s_flex_groups, flex_group);
3997
3998                         if (state)
3999                                 atomic64_sub(clen_changed, &fg->free_clusters);
4000                         else
4001                                 atomic64_add(clen_changed, &fg->free_clusters);
4002
4003                 }
4004
4005                 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
4006                 if (err)
4007                         break;
4008                 sync_dirty_buffer(bitmap_bh);
4009                 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
4010                 sync_dirty_buffer(gdp_bh);
4011                 if (err)
4012                         break;
4013
4014                 block += thisgrp_len;
4015                 len -= thisgrp_len;
4016                 brelse(bitmap_bh);
4017                 BUG_ON(len < 0);
4018         }
4019
4020         if (err)
4021                 brelse(bitmap_bh);
4022 }
4023
4024 /*
4025  * here we normalize request for locality group
4026  * Group request are normalized to s_mb_group_prealloc, which goes to
4027  * s_strip if we set the same via mount option.
4028  * s_mb_group_prealloc can be configured via
4029  * /sys/fs/ext4/<partition>/mb_group_prealloc
4030  *
4031  * XXX: should we try to preallocate more than the group has now?
4032  */
4033 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4034 {
4035         struct super_block *sb = ac->ac_sb;
4036         struct ext4_locality_group *lg = ac->ac_lg;
4037
4038         BUG_ON(lg == NULL);
4039         ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4040         mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4041 }
4042
4043 /*
4044  * Normalization means making request better in terms of
4045  * size and alignment
4046  */
4047 static noinline_for_stack void
4048 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4049                                 struct ext4_allocation_request *ar)
4050 {
4051         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4052         struct ext4_super_block *es = sbi->s_es;
4053         int bsbits, max;
4054         loff_t size, start_off, end;
4055         loff_t orig_size __maybe_unused;
4056         ext4_lblk_t start;
4057         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4058         struct ext4_prealloc_space *pa;
4059
4060         /* do normalize only data requests, metadata requests
4061            do not need preallocation */
4062         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4063                 return;
4064
4065         /* sometime caller may want exact blocks */
4066         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4067                 return;
4068
4069         /* caller may indicate that preallocation isn't
4070          * required (it's a tail, for example) */
4071         if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4072                 return;
4073
4074         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4075                 ext4_mb_normalize_group_request(ac);
4076                 return ;
4077         }
4078
4079         bsbits = ac->ac_sb->s_blocksize_bits;
4080
4081         /* first, let's learn actual file size
4082          * given current request is allocated */
4083         size = extent_logical_end(sbi, &ac->ac_o_ex);
4084         size = size << bsbits;
4085         if (size < i_size_read(ac->ac_inode))
4086                 size = i_size_read(ac->ac_inode);
4087         orig_size = size;
4088
4089         /* max size of free chunks */
4090         max = 2 << bsbits;
4091
4092 #define NRL_CHECK_SIZE(req, size, max, chunk_size)      \
4093                 (req <= (size) || max <= (chunk_size))
4094
4095         /* first, try to predict filesize */
4096         /* XXX: should this table be tunable? */
4097         start_off = 0;
4098         if (size <= 16 * 1024) {
4099                 size = 16 * 1024;
4100         } else if (size <= 32 * 1024) {
4101                 size = 32 * 1024;
4102         } else if (size <= 64 * 1024) {
4103                 size = 64 * 1024;
4104         } else if (size <= 128 * 1024) {
4105                 size = 128 * 1024;
4106         } else if (size <= 256 * 1024) {
4107                 size = 256 * 1024;
4108         } else if (size <= 512 * 1024) {
4109                 size = 512 * 1024;
4110         } else if (size <= 1024 * 1024) {
4111                 size = 1024 * 1024;
4112         } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4113                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4114                                                 (21 - bsbits)) << 21;
4115                 size = 2 * 1024 * 1024;
4116         } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4117                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4118                                                         (22 - bsbits)) << 22;
4119                 size = 4 * 1024 * 1024;
4120         } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
4121                                         (8<<20)>>bsbits, max, 8 * 1024)) {
4122                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4123                                                         (23 - bsbits)) << 23;
4124                 size = 8 * 1024 * 1024;
4125         } else {
4126                 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4127                 size      = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
4128                                               ac->ac_o_ex.fe_len) << bsbits;
4129         }
4130         size = size >> bsbits;
4131         start = start_off >> bsbits;
4132
4133         /*
4134          * For tiny groups (smaller than 8MB) the chosen allocation
4135          * alignment may be larger than group size. Make sure the
4136          * alignment does not move allocation to a different group which
4137          * makes mballoc fail assertions later.
4138          */
4139         start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4140                         (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4141
4142         /* avoid unnecessary preallocation that may trigger assertions */
4143         if (start + size > EXT_MAX_BLOCKS)
4144                 size = EXT_MAX_BLOCKS - start;
4145
4146         /* don't cover already allocated blocks in selected range */
4147         if (ar->pleft && start <= ar->lleft) {
4148                 size -= ar->lleft + 1 - start;
4149                 start = ar->lleft + 1;
4150         }
4151         if (ar->pright && start + size - 1 >= ar->lright)
4152                 size -= start + size - ar->lright;
4153
4154         /*
4155          * Trim allocation request for filesystems with artificially small
4156          * groups.
4157          */
4158         if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4159                 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4160
4161         end = start + size;
4162
4163         /* check we don't cross already preallocated blocks */
4164         rcu_read_lock();
4165         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4166                 loff_t pa_end;
4167
4168                 if (pa->pa_deleted)
4169                         continue;
4170                 spin_lock(&pa->pa_lock);
4171                 if (pa->pa_deleted) {
4172                         spin_unlock(&pa->pa_lock);
4173                         continue;
4174                 }
4175
4176                 pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
4177
4178                 /* PA must not overlap original request */
4179                 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
4180                         ac->ac_o_ex.fe_logical < pa->pa_lstart));
4181
4182                 /* skip PAs this normalized request doesn't overlap with */
4183                 if (pa->pa_lstart >= end || pa_end <= start) {
4184                         spin_unlock(&pa->pa_lock);
4185                         continue;
4186                 }
4187                 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
4188
4189                 /* adjust start or end to be adjacent to this pa */
4190                 if (pa_end <= ac->ac_o_ex.fe_logical) {
4191                         BUG_ON(pa_end < start);
4192                         start = pa_end;
4193                 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4194                         BUG_ON(pa->pa_lstart > end);
4195                         end = pa->pa_lstart;
4196                 }
4197                 spin_unlock(&pa->pa_lock);
4198         }
4199         rcu_read_unlock();
4200         size = end - start;
4201
4202         /* XXX: extra loop to check we really don't overlap preallocations */
4203         rcu_read_lock();
4204         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4205                 loff_t pa_end;
4206
4207                 spin_lock(&pa->pa_lock);
4208                 if (pa->pa_deleted == 0) {
4209                         pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
4210                         BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
4211                 }
4212                 spin_unlock(&pa->pa_lock);
4213         }
4214         rcu_read_unlock();
4215
4216         /*
4217          * In this function "start" and "size" are normalized for better
4218          * alignment and length such that we could preallocate more blocks.
4219          * This normalization is done such that original request of
4220          * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4221          * "size" boundaries.
4222          * (Note fe_len can be relaxed since FS block allocation API does not
4223          * provide gurantee on number of contiguous blocks allocation since that
4224          * depends upon free space left, etc).
4225          * In case of inode pa, later we use the allocated blocks
4226          * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated
4227          * range of goal/best blocks [start, size] to put it at the
4228          * ac_o_ex.fe_logical extent of this inode.
4229          * (See ext4_mb_use_inode_pa() for more details)
4230          */
4231         if (start + size <= ac->ac_o_ex.fe_logical ||
4232                         start > ac->ac_o_ex.fe_logical) {
4233                 ext4_msg(ac->ac_sb, KERN_ERR,
4234                          "start %lu, size %lu, fe_logical %lu",
4235                          (unsigned long) start, (unsigned long) size,
4236                          (unsigned long) ac->ac_o_ex.fe_logical);
4237                 BUG();
4238         }
4239         BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4240
4241         /* now prepare goal request */
4242
4243         /* XXX: is it better to align blocks WRT to logical
4244          * placement or satisfy big request as is */
4245         ac->ac_g_ex.fe_logical = start;
4246         ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4247
4248         /* define goal start in order to merge */
4249         if (ar->pright && (ar->lright == (start + size)) &&
4250             ar->pright >= size &&
4251             ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4252                 /* merge to the right */
4253                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4254                                                 &ac->ac_g_ex.fe_group,
4255                                                 &ac->ac_g_ex.fe_start);
4256                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4257         }
4258         if (ar->pleft && (ar->lleft + 1 == start) &&
4259             ar->pleft + 1 < ext4_blocks_count(es)) {
4260                 /* merge to the left */
4261                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4262                                                 &ac->ac_g_ex.fe_group,
4263                                                 &ac->ac_g_ex.fe_start);
4264                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4265         }
4266
4267         mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4268                  orig_size, start);
4269 }
4270
4271 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4272 {
4273         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4274
4275         if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4276                 atomic_inc(&sbi->s_bal_reqs);
4277                 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4278                 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4279                         atomic_inc(&sbi->s_bal_success);
4280                 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4281                 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4282                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4283                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4284                         atomic_inc(&sbi->s_bal_goals);
4285                 if (ac->ac_found > sbi->s_mb_max_to_scan)
4286                         atomic_inc(&sbi->s_bal_breaks);
4287         }
4288
4289         if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4290                 trace_ext4_mballoc_alloc(ac);
4291         else
4292                 trace_ext4_mballoc_prealloc(ac);
4293 }
4294
4295 /*
4296  * Called on failure; free up any blocks from the inode PA for this
4297  * context.  We don't need this for MB_GROUP_PA because we only change
4298  * pa_free in ext4_mb_release_context(), but on failure, we've already
4299  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4300  */
4301 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4302 {
4303         struct ext4_prealloc_space *pa = ac->ac_pa;
4304         struct ext4_buddy e4b;
4305         int err;
4306
4307         if (pa == NULL) {
4308                 if (ac->ac_f_ex.fe_len == 0)
4309                         return;
4310                 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4311                 if (err) {
4312                         /*
4313                          * This should never happen since we pin the
4314                          * pages in the ext4_allocation_context so
4315                          * ext4_mb_load_buddy() should never fail.
4316                          */
4317                         WARN(1, "mb_load_buddy failed (%d)", err);
4318                         return;
4319                 }
4320                 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4321                 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4322                                ac->ac_f_ex.fe_len);
4323                 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4324                 ext4_mb_unload_buddy(&e4b);
4325                 return;
4326         }
4327         if (pa->pa_type == MB_INODE_PA)
4328                 pa->pa_free += ac->ac_b_ex.fe_len;
4329 }
4330
4331 /*
4332  * use blocks preallocated to inode
4333  */
4334 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4335                                 struct ext4_prealloc_space *pa)
4336 {
4337         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4338         ext4_fsblk_t start;
4339         ext4_fsblk_t end;
4340         int len;
4341
4342         /* found preallocated blocks, use them */
4343         start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4344         end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4345                   start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4346         len = EXT4_NUM_B2C(sbi, end - start);
4347         ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4348                                         &ac->ac_b_ex.fe_start);
4349         ac->ac_b_ex.fe_len = len;
4350         ac->ac_status = AC_STATUS_FOUND;
4351         ac->ac_pa = pa;
4352
4353         BUG_ON(start < pa->pa_pstart);
4354         BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4355         BUG_ON(pa->pa_free < len);
4356         BUG_ON(ac->ac_b_ex.fe_len <= 0);
4357         pa->pa_free -= len;
4358
4359         mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4360 }
4361
4362 /*
4363  * use blocks preallocated to locality group
4364  */
4365 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4366                                 struct ext4_prealloc_space *pa)
4367 {
4368         unsigned int len = ac->ac_o_ex.fe_len;
4369
4370         ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4371                                         &ac->ac_b_ex.fe_group,
4372                                         &ac->ac_b_ex.fe_start);
4373         ac->ac_b_ex.fe_len = len;
4374         ac->ac_status = AC_STATUS_FOUND;
4375         ac->ac_pa = pa;
4376
4377         /* we don't correct pa_pstart or pa_plen here to avoid
4378          * possible race when the group is being loaded concurrently
4379          * instead we correct pa later, after blocks are marked
4380          * in on-disk bitmap -- see ext4_mb_release_context()
4381          * Other CPUs are prevented from allocating from this pa by lg_mutex
4382          */
4383         mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4384                  pa->pa_lstart-len, len, pa);
4385 }
4386
4387 /*
4388  * Return the prealloc space that have minimal distance
4389  * from the goal block. @cpa is the prealloc
4390  * space that is having currently known minimal distance
4391  * from the goal block.
4392  */
4393 static struct ext4_prealloc_space *
4394 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4395                         struct ext4_prealloc_space *pa,
4396                         struct ext4_prealloc_space *cpa)
4397 {
4398         ext4_fsblk_t cur_distance, new_distance;
4399
4400         if (cpa == NULL) {
4401                 atomic_inc(&pa->pa_count);
4402                 return pa;
4403         }
4404         cur_distance = abs(goal_block - cpa->pa_pstart);
4405         new_distance = abs(goal_block - pa->pa_pstart);
4406
4407         if (cur_distance <= new_distance)
4408                 return cpa;
4409
4410         /* drop the previous reference */
4411         atomic_dec(&cpa->pa_count);
4412         atomic_inc(&pa->pa_count);
4413         return pa;
4414 }
4415
4416 /*
4417  * search goal blocks in preallocated space
4418  */
4419 static noinline_for_stack bool
4420 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4421 {
4422         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4423         int order, i;
4424         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4425         struct ext4_locality_group *lg;
4426         struct ext4_prealloc_space *pa, *cpa = NULL;
4427         ext4_fsblk_t goal_block;
4428
4429         /* only data can be preallocated */
4430         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4431                 return false;
4432
4433         /* first, try per-file preallocation */
4434         rcu_read_lock();
4435         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4436
4437                 /* all fields in this condition don't change,
4438                  * so we can skip locking for them */
4439                 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
4440                     ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa))
4441                         continue;
4442
4443                 /* non-extent files can't have physical blocks past 2^32 */
4444                 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4445                     (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
4446                      EXT4_MAX_BLOCK_FILE_PHYS))
4447                         continue;
4448
4449                 /* found preallocated blocks, use them */
4450                 spin_lock(&pa->pa_lock);
4451                 if (pa->pa_deleted == 0 && pa->pa_free) {
4452                         atomic_inc(&pa->pa_count);
4453                         ext4_mb_use_inode_pa(ac, pa);
4454                         spin_unlock(&pa->pa_lock);
4455                         ac->ac_criteria = 10;
4456                         rcu_read_unlock();
4457                         return true;
4458                 }
4459                 spin_unlock(&pa->pa_lock);
4460         }
4461         rcu_read_unlock();
4462
4463         /* can we use group allocation? */
4464         if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4465                 return false;
4466
4467         /* inode may have no locality group for some reason */
4468         lg = ac->ac_lg;
4469         if (lg == NULL)
4470                 return false;
4471         order  = fls(ac->ac_o_ex.fe_len) - 1;
4472         if (order > PREALLOC_TB_SIZE - 1)
4473                 /* The max size of hash table is PREALLOC_TB_SIZE */
4474                 order = PREALLOC_TB_SIZE - 1;
4475
4476         goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4477         /*
4478          * search for the prealloc space that is having
4479          * minimal distance from the goal block.
4480          */
4481         for (i = order; i < PREALLOC_TB_SIZE; i++) {
4482                 rcu_read_lock();
4483                 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
4484                                         pa_inode_list) {
4485                         spin_lock(&pa->pa_lock);
4486                         if (pa->pa_deleted == 0 &&
4487                                         pa->pa_free >= ac->ac_o_ex.fe_len) {
4488
4489                                 cpa = ext4_mb_check_group_pa(goal_block,
4490                                                                 pa, cpa);
4491                         }
4492                         spin_unlock(&pa->pa_lock);
4493                 }
4494                 rcu_read_unlock();
4495         }
4496         if (cpa) {
4497                 ext4_mb_use_group_pa(ac, cpa);
4498                 ac->ac_criteria = 20;
4499                 return true;
4500         }
4501         return false;
4502 }
4503
4504 /*
4505  * the function goes through all block freed in the group
4506  * but not yet committed and marks them used in in-core bitmap.
4507  * buddy must be generated from this bitmap
4508  * Need to be called with the ext4 group lock held
4509  */
4510 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4511                                                 ext4_group_t group)
4512 {
4513         struct rb_node *n;
4514         struct ext4_group_info *grp;
4515         struct ext4_free_data *entry;
4516
4517         grp = ext4_get_group_info(sb, group);
4518         if (!grp)
4519                 return;
4520         n = rb_first(&(grp->bb_free_root));
4521
4522         while (n) {
4523                 entry = rb_entry(n, struct ext4_free_data, efd_node);
4524                 mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
4525                 n = rb_next(n);
4526         }
4527         return;
4528 }
4529
4530 /*
4531  * the function goes through all preallocation in this group and marks them
4532  * used in in-core bitmap. buddy must be generated from this bitmap
4533  * Need to be called with ext4 group lock held
4534  */
4535 static noinline_for_stack
4536 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4537                                         ext4_group_t group)
4538 {
4539         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4540         struct ext4_prealloc_space *pa;
4541         struct list_head *cur;
4542         ext4_group_t groupnr;
4543         ext4_grpblk_t start;
4544         int preallocated = 0;
4545         int len;
4546
4547         if (!grp)
4548                 return;
4549
4550         /* all form of preallocation discards first load group,
4551          * so the only competing code is preallocation use.
4552          * we don't need any locking here
4553          * notice we do NOT ignore preallocations with pa_deleted
4554          * otherwise we could leave used blocks available for
4555          * allocation in buddy when concurrent ext4_mb_put_pa()
4556          * is dropping preallocation
4557          */
4558         list_for_each(cur, &grp->bb_prealloc_list) {
4559                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4560                 spin_lock(&pa->pa_lock);
4561                 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4562                                              &groupnr, &start);
4563                 len = pa->pa_len;
4564                 spin_unlock(&pa->pa_lock);
4565                 if (unlikely(len == 0))
4566                         continue;
4567                 BUG_ON(groupnr != group);
4568                 mb_set_bits(bitmap, start, len);
4569                 preallocated += len;
4570         }
4571         mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4572 }
4573
4574 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4575                                     struct ext4_prealloc_space *pa)
4576 {
4577         struct ext4_inode_info *ei;
4578
4579         if (pa->pa_deleted) {
4580                 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4581                              pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4582                              pa->pa_len);
4583                 return;
4584         }
4585
4586         pa->pa_deleted = 1;
4587
4588         if (pa->pa_type == MB_INODE_PA) {
4589                 ei = EXT4_I(pa->pa_inode);
4590                 atomic_dec(&ei->i_prealloc_active);
4591         }
4592 }
4593
4594 static void ext4_mb_pa_callback(struct rcu_head *head)
4595 {
4596         struct ext4_prealloc_space *pa;
4597         pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4598
4599         BUG_ON(atomic_read(&pa->pa_count));
4600         BUG_ON(pa->pa_deleted == 0);
4601         kmem_cache_free(ext4_pspace_cachep, pa);
4602 }
4603
4604 /*
4605  * drops a reference to preallocated space descriptor
4606  * if this was the last reference and the space is consumed
4607  */
4608 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4609                         struct super_block *sb, struct ext4_prealloc_space *pa)
4610 {
4611         ext4_group_t grp;
4612         ext4_fsblk_t grp_blk;
4613
4614         /* in this short window concurrent discard can set pa_deleted */
4615         spin_lock(&pa->pa_lock);
4616         if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4617                 spin_unlock(&pa->pa_lock);
4618                 return;
4619         }
4620
4621         if (pa->pa_deleted == 1) {
4622                 spin_unlock(&pa->pa_lock);
4623                 return;
4624         }
4625
4626         ext4_mb_mark_pa_deleted(sb, pa);
4627         spin_unlock(&pa->pa_lock);
4628
4629         grp_blk = pa->pa_pstart;
4630         /*
4631          * If doing group-based preallocation, pa_pstart may be in the
4632          * next group when pa is used up
4633          */
4634         if (pa->pa_type == MB_GROUP_PA)
4635                 grp_blk--;
4636
4637         grp = ext4_get_group_number(sb, grp_blk);
4638
4639         /*
4640          * possible race:
4641          *
4642          *  P1 (buddy init)                     P2 (regular allocation)
4643          *                                      find block B in PA
4644          *  copy on-disk bitmap to buddy
4645          *                                      mark B in on-disk bitmap
4646          *                                      drop PA from group
4647          *  mark all PAs in buddy
4648          *
4649          * thus, P1 initializes buddy with B available. to prevent this
4650          * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4651          * against that pair
4652          */
4653         ext4_lock_group(sb, grp);
4654         list_del(&pa->pa_group_list);
4655         ext4_unlock_group(sb, grp);
4656
4657         spin_lock(pa->pa_obj_lock);
4658         list_del_rcu(&pa->pa_inode_list);
4659         spin_unlock(pa->pa_obj_lock);
4660
4661         call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4662 }
4663
4664 /*
4665  * creates new preallocated space for given inode
4666  */
4667 static noinline_for_stack void
4668 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4669 {
4670         struct super_block *sb = ac->ac_sb;
4671         struct ext4_sb_info *sbi = EXT4_SB(sb);
4672         struct ext4_prealloc_space *pa;
4673         struct ext4_group_info *grp;
4674         struct ext4_inode_info *ei;
4675
4676         /* preallocate only when found space is larger then requested */
4677         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4678         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4679         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4680         BUG_ON(ac->ac_pa == NULL);
4681
4682         pa = ac->ac_pa;
4683
4684         if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4685                 struct ext4_free_extent ex = {
4686                         .fe_logical = ac->ac_g_ex.fe_logical,
4687                         .fe_len = ac->ac_g_ex.fe_len,
4688                 };
4689                 loff_t orig_goal_end = extent_logical_end(sbi, &ex);
4690                 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
4691
4692                 /*
4693                  * We can't allocate as much as normalizer wants, so we try
4694                  * to get proper lstart to cover the original request, except
4695                  * when the goal doesn't cover the original request as below:
4696                  *
4697                  * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
4698                  * best_ex:0/200(200) -> adjusted: 1848/2048(200)
4699                  */
4700                 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4701                 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4702
4703                 /*
4704                  * Use the below logic for adjusting best extent as it keeps
4705                  * fragmentation in check while ensuring logical range of best
4706                  * extent doesn't overflow out of goal extent:
4707                  *
4708                  * 1. Check if best ex can be kept at end of goal and still
4709                  *    cover original start
4710                  * 2. Else, check if best ex can be kept at start of goal and
4711                  *    still cover original end
4712                  * 3. Else, keep the best ex at start of original request.
4713                  */
4714                 ex.fe_len = ac->ac_b_ex.fe_len;
4715
4716                 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
4717                 if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
4718                         goto adjust_bex;
4719
4720                 ex.fe_logical = ac->ac_g_ex.fe_logical;
4721                 if (o_ex_end <= extent_logical_end(sbi, &ex))
4722                         goto adjust_bex;
4723
4724                 ex.fe_logical = ac->ac_o_ex.fe_logical;
4725 adjust_bex:
4726                 ac->ac_b_ex.fe_logical = ex.fe_logical;
4727
4728                 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4729                 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
4730         }
4731
4732         /* preallocation can change ac_b_ex, thus we store actually
4733          * allocated blocks for history */
4734         ac->ac_f_ex = ac->ac_b_ex;
4735
4736         pa->pa_lstart = ac->ac_b_ex.fe_logical;
4737         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4738         pa->pa_len = ac->ac_b_ex.fe_len;
4739         pa->pa_free = pa->pa_len;
4740         spin_lock_init(&pa->pa_lock);
4741         INIT_LIST_HEAD(&pa->pa_inode_list);
4742         INIT_LIST_HEAD(&pa->pa_group_list);
4743         pa->pa_deleted = 0;
4744         pa->pa_type = MB_INODE_PA;
4745
4746         mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4747                  pa->pa_len, pa->pa_lstart);
4748         trace_ext4_mb_new_inode_pa(ac, pa);
4749
4750         ext4_mb_use_inode_pa(ac, pa);
4751         atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4752
4753         ei = EXT4_I(ac->ac_inode);
4754         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4755         if (!grp)
4756                 return;
4757
4758         pa->pa_obj_lock = &ei->i_prealloc_lock;
4759         pa->pa_inode = ac->ac_inode;
4760
4761         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4762
4763         spin_lock(pa->pa_obj_lock);
4764         list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4765         spin_unlock(pa->pa_obj_lock);
4766         atomic_inc(&ei->i_prealloc_active);
4767 }
4768
4769 /*
4770  * creates new preallocated space for locality group inodes belongs to
4771  */
4772 static noinline_for_stack void
4773 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4774 {
4775         struct super_block *sb = ac->ac_sb;
4776         struct ext4_locality_group *lg;
4777         struct ext4_prealloc_space *pa;
4778         struct ext4_group_info *grp;
4779
4780         /* preallocate only when found space is larger then requested */
4781         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4782         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4783         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4784         BUG_ON(ac->ac_pa == NULL);
4785
4786         pa = ac->ac_pa;
4787
4788         /* preallocation can change ac_b_ex, thus we store actually
4789          * allocated blocks for history */
4790         ac->ac_f_ex = ac->ac_b_ex;
4791
4792         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4793         pa->pa_lstart = pa->pa_pstart;
4794         pa->pa_len = ac->ac_b_ex.fe_len;
4795         pa->pa_free = pa->pa_len;
4796         spin_lock_init(&pa->pa_lock);
4797         INIT_LIST_HEAD(&pa->pa_inode_list);
4798         INIT_LIST_HEAD(&pa->pa_group_list);
4799         pa->pa_deleted = 0;
4800         pa->pa_type = MB_GROUP_PA;
4801
4802         mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4803                  pa->pa_len, pa->pa_lstart);
4804         trace_ext4_mb_new_group_pa(ac, pa);
4805
4806         ext4_mb_use_group_pa(ac, pa);
4807         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4808
4809         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4810         if (!grp)
4811                 return;
4812         lg = ac->ac_lg;
4813         BUG_ON(lg == NULL);
4814
4815         pa->pa_obj_lock = &lg->lg_prealloc_lock;
4816         pa->pa_inode = NULL;
4817
4818         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4819
4820         /*
4821          * We will later add the new pa to the right bucket
4822          * after updating the pa_free in ext4_mb_release_context
4823          */
4824 }
4825
4826 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4827 {
4828         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4829                 ext4_mb_new_group_pa(ac);
4830         else
4831                 ext4_mb_new_inode_pa(ac);
4832 }
4833
4834 /*
4835  * finds all unused blocks in on-disk bitmap, frees them in
4836  * in-core bitmap and buddy.
4837  * @pa must be unlinked from inode and group lists, so that
4838  * nobody else can find/use it.
4839  * the caller MUST hold group/inode locks.
4840  * TODO: optimize the case when there are no in-core structures yet
4841  */
4842 static noinline_for_stack int
4843 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4844                         struct ext4_prealloc_space *pa)
4845 {
4846         struct super_block *sb = e4b->bd_sb;
4847         struct ext4_sb_info *sbi = EXT4_SB(sb);
4848         unsigned int end;
4849         unsigned int next;
4850         ext4_group_t group;
4851         ext4_grpblk_t bit;
4852         unsigned long long grp_blk_start;
4853         int free = 0;
4854
4855         BUG_ON(pa->pa_deleted == 0);
4856         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4857         grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4858         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4859         end = bit + pa->pa_len;
4860
4861         while (bit < end) {
4862                 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4863                 if (bit >= end)
4864                         break;
4865                 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4866                 mb_debug(sb, "free preallocated %u/%u in group %u\n",
4867                          (unsigned) ext4_group_first_block_no(sb, group) + bit,
4868                          (unsigned) next - bit, (unsigned) group);
4869                 free += next - bit;
4870
4871                 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4872                 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4873                                                     EXT4_C2B(sbi, bit)),
4874                                                next - bit);
4875                 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4876                 bit = next + 1;
4877         }
4878         if (free != pa->pa_free) {
4879                 ext4_msg(e4b->bd_sb, KERN_CRIT,
4880                          "pa %p: logic %lu, phys. %lu, len %d",
4881                          pa, (unsigned long) pa->pa_lstart,
4882                          (unsigned long) pa->pa_pstart,
4883                          pa->pa_len);
4884                 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4885                                         free, pa->pa_free);
4886                 /*
4887                  * pa is already deleted so we use the value obtained
4888                  * from the bitmap and continue.
4889                  */
4890         }
4891         atomic_add(free, &sbi->s_mb_discarded);
4892
4893         return 0;
4894 }
4895
4896 static noinline_for_stack int
4897 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4898                                 struct ext4_prealloc_space *pa)
4899 {
4900         struct super_block *sb = e4b->bd_sb;
4901         ext4_group_t group;
4902         ext4_grpblk_t bit;
4903
4904         trace_ext4_mb_release_group_pa(sb, pa);
4905         BUG_ON(pa->pa_deleted == 0);
4906         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4907         if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
4908                 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
4909                              e4b->bd_group, group, pa->pa_pstart);
4910                 return 0;
4911         }
4912         mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4913         atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4914         trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4915
4916         return 0;
4917 }
4918
4919 /*
4920  * releases all preallocations in given group
4921  *
4922  * first, we need to decide discard policy:
4923  * - when do we discard
4924  *   1) ENOSPC
4925  * - how many do we discard
4926  *   1) how many requested
4927  */
4928 static noinline_for_stack int
4929 ext4_mb_discard_group_preallocations(struct super_block *sb,
4930                                      ext4_group_t group, int *busy)
4931 {
4932         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4933         struct buffer_head *bitmap_bh = NULL;
4934         struct ext4_prealloc_space *pa, *tmp;
4935         struct list_head list;
4936         struct ext4_buddy e4b;
4937         int err;
4938         int free = 0;
4939
4940         if (!grp)
4941                 return 0;
4942         mb_debug(sb, "discard preallocation for group %u\n", group);
4943         if (list_empty(&grp->bb_prealloc_list))
4944                 goto out_dbg;
4945
4946         bitmap_bh = ext4_read_block_bitmap(sb, group);
4947         if (IS_ERR(bitmap_bh)) {
4948                 err = PTR_ERR(bitmap_bh);
4949                 ext4_error_err(sb, -err,
4950                                "Error %d reading block bitmap for %u",
4951                                err, group);
4952                 goto out_dbg;
4953         }
4954
4955         err = ext4_mb_load_buddy(sb, group, &e4b);
4956         if (err) {
4957                 ext4_warning(sb, "Error %d loading buddy information for %u",
4958                              err, group);
4959                 put_bh(bitmap_bh);
4960                 goto out_dbg;
4961         }
4962
4963         INIT_LIST_HEAD(&list);
4964         ext4_lock_group(sb, group);
4965         list_for_each_entry_safe(pa, tmp,
4966                                 &grp->bb_prealloc_list, pa_group_list) {
4967                 spin_lock(&pa->pa_lock);
4968                 if (atomic_read(&pa->pa_count)) {
4969                         spin_unlock(&pa->pa_lock);
4970                         *busy = 1;
4971                         continue;
4972                 }
4973                 if (pa->pa_deleted) {
4974                         spin_unlock(&pa->pa_lock);
4975                         continue;
4976                 }
4977
4978                 /* seems this one can be freed ... */
4979                 ext4_mb_mark_pa_deleted(sb, pa);
4980
4981                 if (!free)
4982                         this_cpu_inc(discard_pa_seq);
4983
4984                 /* we can trust pa_free ... */
4985                 free += pa->pa_free;
4986
4987                 spin_unlock(&pa->pa_lock);
4988
4989                 list_del(&pa->pa_group_list);
4990                 list_add(&pa->u.pa_tmp_list, &list);
4991         }
4992
4993         /* now free all selected PAs */
4994         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4995
4996                 /* remove from object (inode or locality group) */
4997                 spin_lock(pa->pa_obj_lock);
4998                 list_del_rcu(&pa->pa_inode_list);
4999                 spin_unlock(pa->pa_obj_lock);
5000
5001                 if (pa->pa_type == MB_GROUP_PA)
5002                         ext4_mb_release_group_pa(&e4b, pa);
5003                 else
5004                         ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5005
5006                 list_del(&pa->u.pa_tmp_list);
5007                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5008         }
5009
5010         ext4_unlock_group(sb, group);
5011         ext4_mb_unload_buddy(&e4b);
5012         put_bh(bitmap_bh);
5013 out_dbg:
5014         mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
5015                  free, group, grp->bb_free);
5016         return free;
5017 }
5018
5019 /*
5020  * releases all non-used preallocated blocks for given inode
5021  *
5022  * It's important to discard preallocations under i_data_sem
5023  * We don't want another block to be served from the prealloc
5024  * space when we are discarding the inode prealloc space.
5025  *
5026  * FIXME!! Make sure it is valid at all the call sites
5027  */
5028 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
5029 {
5030         struct ext4_inode_info *ei = EXT4_I(inode);
5031         struct super_block *sb = inode->i_sb;
5032         struct buffer_head *bitmap_bh = NULL;
5033         struct ext4_prealloc_space *pa, *tmp;
5034         ext4_group_t group = 0;
5035         struct list_head list;
5036         struct ext4_buddy e4b;
5037         int err;
5038
5039         if (!S_ISREG(inode->i_mode)) {
5040                 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
5041                 return;
5042         }
5043
5044         if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5045                 return;
5046
5047         mb_debug(sb, "discard preallocation for inode %lu\n",
5048                  inode->i_ino);
5049         trace_ext4_discard_preallocations(inode,
5050                         atomic_read(&ei->i_prealloc_active), needed);
5051
5052         INIT_LIST_HEAD(&list);
5053
5054         if (needed == 0)
5055                 needed = UINT_MAX;
5056
5057 repeat:
5058         /* first, collect all pa's in the inode */
5059         spin_lock(&ei->i_prealloc_lock);
5060         while (!list_empty(&ei->i_prealloc_list) && needed) {
5061                 pa = list_entry(ei->i_prealloc_list.prev,
5062                                 struct ext4_prealloc_space, pa_inode_list);
5063                 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
5064                 spin_lock(&pa->pa_lock);
5065                 if (atomic_read(&pa->pa_count)) {
5066                         /* this shouldn't happen often - nobody should
5067                          * use preallocation while we're discarding it */
5068                         spin_unlock(&pa->pa_lock);
5069                         spin_unlock(&ei->i_prealloc_lock);
5070                         ext4_msg(sb, KERN_ERR,
5071                                  "uh-oh! used pa while discarding");
5072                         WARN_ON(1);
5073                         schedule_timeout_uninterruptible(HZ);
5074                         goto repeat;
5075
5076                 }
5077                 if (pa->pa_deleted == 0) {
5078                         ext4_mb_mark_pa_deleted(sb, pa);
5079                         spin_unlock(&pa->pa_lock);
5080                         list_del_rcu(&pa->pa_inode_list);
5081                         list_add(&pa->u.pa_tmp_list, &list);
5082                         needed--;
5083                         continue;
5084                 }
5085
5086                 /* someone is deleting pa right now */
5087                 spin_unlock(&pa->pa_lock);
5088                 spin_unlock(&ei->i_prealloc_lock);
5089
5090                 /* we have to wait here because pa_deleted
5091                  * doesn't mean pa is already unlinked from
5092                  * the list. as we might be called from
5093                  * ->clear_inode() the inode will get freed
5094                  * and concurrent thread which is unlinking
5095                  * pa from inode's list may access already
5096                  * freed memory, bad-bad-bad */
5097
5098                 /* XXX: if this happens too often, we can
5099                  * add a flag to force wait only in case
5100                  * of ->clear_inode(), but not in case of
5101                  * regular truncate */
5102                 schedule_timeout_uninterruptible(HZ);
5103                 goto repeat;
5104         }
5105         spin_unlock(&ei->i_prealloc_lock);
5106
5107         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5108                 BUG_ON(pa->pa_type != MB_INODE_PA);
5109                 group = ext4_get_group_number(sb, pa->pa_pstart);
5110
5111                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5112                                              GFP_NOFS|__GFP_NOFAIL);
5113                 if (err) {
5114                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5115                                        err, group);
5116                         continue;
5117                 }
5118
5119                 bitmap_bh = ext4_read_block_bitmap(sb, group);
5120                 if (IS_ERR(bitmap_bh)) {
5121                         err = PTR_ERR(bitmap_bh);
5122                         ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5123                                        err, group);
5124                         ext4_mb_unload_buddy(&e4b);
5125                         continue;
5126                 }
5127
5128                 ext4_lock_group(sb, group);
5129                 list_del(&pa->pa_group_list);
5130                 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5131                 ext4_unlock_group(sb, group);
5132
5133                 ext4_mb_unload_buddy(&e4b);
5134                 put_bh(bitmap_bh);
5135
5136                 list_del(&pa->u.pa_tmp_list);
5137                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5138         }
5139 }
5140
5141 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5142 {
5143         struct ext4_prealloc_space *pa;
5144
5145         BUG_ON(ext4_pspace_cachep == NULL);
5146         pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5147         if (!pa)
5148                 return -ENOMEM;
5149         atomic_set(&pa->pa_count, 1);
5150         ac->ac_pa = pa;
5151         return 0;
5152 }
5153
5154 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
5155 {
5156         struct ext4_prealloc_space *pa = ac->ac_pa;
5157
5158         BUG_ON(!pa);
5159         ac->ac_pa = NULL;
5160         WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5161         kmem_cache_free(ext4_pspace_cachep, pa);
5162 }
5163
5164 #ifdef CONFIG_EXT4_DEBUG
5165 static inline void ext4_mb_show_pa(struct super_block *sb)
5166 {
5167         ext4_group_t i, ngroups;
5168
5169         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5170                 return;
5171
5172         ngroups = ext4_get_groups_count(sb);
5173         mb_debug(sb, "groups: ");
5174         for (i = 0; i < ngroups; i++) {
5175                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5176                 struct ext4_prealloc_space *pa;
5177                 ext4_grpblk_t start;
5178                 struct list_head *cur;
5179
5180                 if (!grp)
5181                         continue;
5182                 ext4_lock_group(sb, i);
5183                 list_for_each(cur, &grp->bb_prealloc_list) {
5184                         pa = list_entry(cur, struct ext4_prealloc_space,
5185                                         pa_group_list);
5186                         spin_lock(&pa->pa_lock);
5187                         ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5188                                                      NULL, &start);
5189                         spin_unlock(&pa->pa_lock);
5190                         mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5191                                  pa->pa_len);
5192                 }
5193                 ext4_unlock_group(sb, i);
5194                 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5195                          grp->bb_fragments);
5196         }
5197 }
5198
5199 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5200 {
5201         struct super_block *sb = ac->ac_sb;
5202
5203         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5204                 return;
5205
5206         mb_debug(sb, "Can't allocate:"
5207                         " Allocation context details:");
5208         mb_debug(sb, "status %u flags 0x%x",
5209                         ac->ac_status, ac->ac_flags);
5210         mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5211                         "goal %lu/%lu/%lu@%lu, "
5212                         "best %lu/%lu/%lu@%lu cr %d",
5213                         (unsigned long)ac->ac_o_ex.fe_group,
5214                         (unsigned long)ac->ac_o_ex.fe_start,
5215                         (unsigned long)ac->ac_o_ex.fe_len,
5216                         (unsigned long)ac->ac_o_ex.fe_logical,
5217                         (unsigned long)ac->ac_g_ex.fe_group,
5218                         (unsigned long)ac->ac_g_ex.fe_start,
5219                         (unsigned long)ac->ac_g_ex.fe_len,
5220                         (unsigned long)ac->ac_g_ex.fe_logical,
5221                         (unsigned long)ac->ac_b_ex.fe_group,
5222                         (unsigned long)ac->ac_b_ex.fe_start,
5223                         (unsigned long)ac->ac_b_ex.fe_len,
5224                         (unsigned long)ac->ac_b_ex.fe_logical,
5225                         (int)ac->ac_criteria);
5226         mb_debug(sb, "%u found", ac->ac_found);
5227         ext4_mb_show_pa(sb);
5228 }
5229 #else
5230 static inline void ext4_mb_show_pa(struct super_block *sb)
5231 {
5232         return;
5233 }
5234 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5235 {
5236         ext4_mb_show_pa(ac->ac_sb);
5237         return;
5238 }
5239 #endif
5240
5241 /*
5242  * We use locality group preallocation for small size file. The size of the
5243  * file is determined by the current size or the resulting size after
5244  * allocation which ever is larger
5245  *
5246  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5247  */
5248 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5249 {
5250         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5251         int bsbits = ac->ac_sb->s_blocksize_bits;
5252         loff_t size, isize;
5253         bool inode_pa_eligible, group_pa_eligible;
5254
5255         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5256                 return;
5257
5258         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5259                 return;
5260
5261         group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5262         inode_pa_eligible = true;
5263         size = extent_logical_end(sbi, &ac->ac_o_ex);
5264         isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5265                 >> bsbits;
5266
5267         /* No point in using inode preallocation for closed files */
5268         if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5269             !inode_is_open_for_write(ac->ac_inode))
5270                 inode_pa_eligible = false;
5271
5272         size = max(size, isize);
5273         /* Don't use group allocation for large files */
5274         if (size > sbi->s_mb_stream_request)
5275                 group_pa_eligible = false;
5276
5277         if (!group_pa_eligible) {
5278                 if (inode_pa_eligible)
5279                         ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5280                 else
5281                         ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5282                 return;
5283         }
5284
5285         BUG_ON(ac->ac_lg != NULL);
5286         /*
5287          * locality group prealloc space are per cpu. The reason for having
5288          * per cpu locality group is to reduce the contention between block
5289          * request from multiple CPUs.
5290          */
5291         ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5292
5293         /* we're going to use group allocation */
5294         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5295
5296         /* serialize all allocations in the group */
5297         mutex_lock(&ac->ac_lg->lg_mutex);
5298 }
5299
5300 static noinline_for_stack int
5301 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5302                                 struct ext4_allocation_request *ar)
5303 {
5304         struct super_block *sb = ar->inode->i_sb;
5305         struct ext4_sb_info *sbi = EXT4_SB(sb);
5306         struct ext4_super_block *es = sbi->s_es;
5307         ext4_group_t group;
5308         unsigned int len;
5309         ext4_fsblk_t goal;
5310         ext4_grpblk_t block;
5311
5312         /* we can't allocate > group size */
5313         len = ar->len;
5314
5315         /* just a dirty hack to filter too big requests  */
5316         if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5317                 len = EXT4_CLUSTERS_PER_GROUP(sb);
5318
5319         /* start searching from the goal */
5320         goal = ar->goal;
5321         if (goal < le32_to_cpu(es->s_first_data_block) ||
5322                         goal >= ext4_blocks_count(es))
5323                 goal = le32_to_cpu(es->s_first_data_block);
5324         ext4_get_group_no_and_offset(sb, goal, &group, &block);
5325
5326         /* set up allocation goals */
5327         ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5328         ac->ac_status = AC_STATUS_CONTINUE;
5329         ac->ac_sb = sb;
5330         ac->ac_inode = ar->inode;
5331         ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5332         ac->ac_o_ex.fe_group = group;
5333         ac->ac_o_ex.fe_start = block;
5334         ac->ac_o_ex.fe_len = len;
5335         ac->ac_g_ex = ac->ac_o_ex;
5336         ac->ac_flags = ar->flags;
5337
5338         /* we have to define context: we'll work with a file or
5339          * locality group. this is a policy, actually */
5340         ext4_mb_group_or_file(ac);
5341
5342         mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5343                         "left: %u/%u, right %u/%u to %swritable\n",
5344                         (unsigned) ar->len, (unsigned) ar->logical,
5345                         (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5346                         (unsigned) ar->lleft, (unsigned) ar->pleft,
5347                         (unsigned) ar->lright, (unsigned) ar->pright,
5348                         inode_is_open_for_write(ar->inode) ? "" : "non-");
5349         return 0;
5350
5351 }
5352
5353 static noinline_for_stack void
5354 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5355                                         struct ext4_locality_group *lg,
5356                                         int order, int total_entries)
5357 {
5358         ext4_group_t group = 0;
5359         struct ext4_buddy e4b;
5360         struct list_head discard_list;
5361         struct ext4_prealloc_space *pa, *tmp;
5362
5363         mb_debug(sb, "discard locality group preallocation\n");
5364
5365         INIT_LIST_HEAD(&discard_list);
5366
5367         spin_lock(&lg->lg_prealloc_lock);
5368         list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5369                                 pa_inode_list,
5370                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
5371                 spin_lock(&pa->pa_lock);
5372                 if (atomic_read(&pa->pa_count)) {
5373                         /*
5374                          * This is the pa that we just used
5375                          * for block allocation. So don't
5376                          * free that
5377                          */
5378                         spin_unlock(&pa->pa_lock);
5379                         continue;
5380                 }
5381                 if (pa->pa_deleted) {
5382                         spin_unlock(&pa->pa_lock);
5383                         continue;
5384                 }
5385                 /* only lg prealloc space */
5386                 BUG_ON(pa->pa_type != MB_GROUP_PA);
5387
5388                 /* seems this one can be freed ... */
5389                 ext4_mb_mark_pa_deleted(sb, pa);
5390                 spin_unlock(&pa->pa_lock);
5391
5392                 list_del_rcu(&pa->pa_inode_list);
5393                 list_add(&pa->u.pa_tmp_list, &discard_list);
5394
5395                 total_entries--;
5396                 if (total_entries <= 5) {
5397                         /*
5398                          * we want to keep only 5 entries
5399                          * allowing it to grow to 8. This
5400                          * mak sure we don't call discard
5401                          * soon for this list.
5402                          */
5403                         break;
5404                 }
5405         }
5406         spin_unlock(&lg->lg_prealloc_lock);
5407
5408         list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5409                 int err;
5410
5411                 group = ext4_get_group_number(sb, pa->pa_pstart);
5412                 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5413                                              GFP_NOFS|__GFP_NOFAIL);
5414                 if (err) {
5415                         ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5416                                        err, group);
5417                         continue;
5418                 }
5419                 ext4_lock_group(sb, group);
5420                 list_del(&pa->pa_group_list);
5421                 ext4_mb_release_group_pa(&e4b, pa);
5422                 ext4_unlock_group(sb, group);
5423
5424                 ext4_mb_unload_buddy(&e4b);
5425                 list_del(&pa->u.pa_tmp_list);
5426                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5427         }
5428 }
5429
5430 /*
5431  * We have incremented pa_count. So it cannot be freed at this
5432  * point. Also we hold lg_mutex. So no parallel allocation is
5433  * possible from this lg. That means pa_free cannot be updated.
5434  *
5435  * A parallel ext4_mb_discard_group_preallocations is possible.
5436  * which can cause the lg_prealloc_list to be updated.
5437  */
5438
5439 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5440 {
5441         int order, added = 0, lg_prealloc_count = 1;
5442         struct super_block *sb = ac->ac_sb;
5443         struct ext4_locality_group *lg = ac->ac_lg;
5444         struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5445
5446         order = fls(pa->pa_free) - 1;
5447         if (order > PREALLOC_TB_SIZE - 1)
5448                 /* The max size of hash table is PREALLOC_TB_SIZE */
5449                 order = PREALLOC_TB_SIZE - 1;
5450         /* Add the prealloc space to lg */
5451         spin_lock(&lg->lg_prealloc_lock);
5452         list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5453                                 pa_inode_list,
5454                                 lockdep_is_held(&lg->lg_prealloc_lock)) {
5455                 spin_lock(&tmp_pa->pa_lock);
5456                 if (tmp_pa->pa_deleted) {
5457                         spin_unlock(&tmp_pa->pa_lock);
5458                         continue;
5459                 }
5460                 if (!added && pa->pa_free < tmp_pa->pa_free) {
5461                         /* Add to the tail of the previous entry */
5462                         list_add_tail_rcu(&pa->pa_inode_list,
5463                                                 &tmp_pa->pa_inode_list);
5464                         added = 1;
5465                         /*
5466                          * we want to count the total
5467                          * number of entries in the list
5468                          */
5469                 }
5470                 spin_unlock(&tmp_pa->pa_lock);
5471                 lg_prealloc_count++;
5472         }
5473         if (!added)
5474                 list_add_tail_rcu(&pa->pa_inode_list,
5475                                         &lg->lg_prealloc_list[order]);
5476         spin_unlock(&lg->lg_prealloc_lock);
5477
5478         /* Now trim the list to be not more than 8 elements */
5479         if (lg_prealloc_count > 8) {
5480                 ext4_mb_discard_lg_preallocations(sb, lg,
5481                                                   order, lg_prealloc_count);
5482                 return;
5483         }
5484         return ;
5485 }
5486
5487 /*
5488  * if per-inode prealloc list is too long, trim some PA
5489  */
5490 static void ext4_mb_trim_inode_pa(struct inode *inode)
5491 {
5492         struct ext4_inode_info *ei = EXT4_I(inode);
5493         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5494         int count, delta;
5495
5496         count = atomic_read(&ei->i_prealloc_active);
5497         delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
5498         if (count > sbi->s_mb_max_inode_prealloc + delta) {
5499                 count -= sbi->s_mb_max_inode_prealloc;
5500                 ext4_discard_preallocations(inode, count);
5501         }
5502 }
5503
5504 /*
5505  * release all resource we used in allocation
5506  */
5507 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5508 {
5509         struct inode *inode = ac->ac_inode;
5510         struct ext4_inode_info *ei = EXT4_I(inode);
5511         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5512         struct ext4_prealloc_space *pa = ac->ac_pa;
5513         if (pa) {
5514                 if (pa->pa_type == MB_GROUP_PA) {
5515                         /* see comment in ext4_mb_use_group_pa() */
5516                         spin_lock(&pa->pa_lock);
5517                         pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5518                         pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5519                         pa->pa_free -= ac->ac_b_ex.fe_len;
5520                         pa->pa_len -= ac->ac_b_ex.fe_len;
5521                         spin_unlock(&pa->pa_lock);
5522
5523                         /*
5524                          * We want to add the pa to the right bucket.
5525                          * Remove it from the list and while adding
5526                          * make sure the list to which we are adding
5527                          * doesn't grow big.
5528                          */
5529                         if (likely(pa->pa_free)) {
5530                                 spin_lock(pa->pa_obj_lock);
5531                                 list_del_rcu(&pa->pa_inode_list);
5532                                 spin_unlock(pa->pa_obj_lock);
5533                                 ext4_mb_add_n_trim(ac);
5534                         }
5535                 }
5536
5537                 if (pa->pa_type == MB_INODE_PA) {
5538                         /*
5539                          * treat per-inode prealloc list as a lru list, then try
5540                          * to trim the least recently used PA.
5541                          */
5542                         spin_lock(pa->pa_obj_lock);
5543                         list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5544                         spin_unlock(pa->pa_obj_lock);
5545                 }
5546
5547                 ext4_mb_put_pa(ac, ac->ac_sb, pa);
5548         }
5549         if (ac->ac_bitmap_page)
5550                 put_page(ac->ac_bitmap_page);
5551         if (ac->ac_buddy_page)
5552                 put_page(ac->ac_buddy_page);
5553         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5554                 mutex_unlock(&ac->ac_lg->lg_mutex);
5555         ext4_mb_collect_stats(ac);
5556         ext4_mb_trim_inode_pa(inode);
5557         return 0;
5558 }
5559
5560 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5561 {
5562         ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5563         int ret;
5564         int freed = 0, busy = 0;
5565         int retry = 0;
5566
5567         trace_ext4_mb_discard_preallocations(sb, needed);
5568
5569         if (needed == 0)
5570                 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5571  repeat:
5572         for (i = 0; i < ngroups && needed > 0; i++) {
5573                 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5574                 freed += ret;
5575                 needed -= ret;
5576                 cond_resched();
5577         }
5578
5579         if (needed > 0 && busy && ++retry < 3) {
5580                 busy = 0;
5581                 goto repeat;
5582         }
5583
5584         return freed;
5585 }
5586
5587 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5588                         struct ext4_allocation_context *ac, u64 *seq)
5589 {
5590         int freed;
5591         u64 seq_retry = 0;
5592         bool ret = false;
5593
5594         freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5595         if (freed) {
5596                 ret = true;
5597                 goto out_dbg;
5598         }
5599         seq_retry = ext4_get_discard_pa_seq_sum();
5600         if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5601                 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5602                 *seq = seq_retry;
5603                 ret = true;
5604         }
5605
5606 out_dbg:
5607         mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5608         return ret;
5609 }
5610
5611 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5612                                 struct ext4_allocation_request *ar, int *errp);
5613
5614 /*
5615  * Main entry point into mballoc to allocate blocks
5616  * it tries to use preallocation first, then falls back
5617  * to usual allocation
5618  */
5619 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5620                                 struct ext4_allocation_request *ar, int *errp)
5621 {
5622         struct ext4_allocation_context *ac = NULL;
5623         struct ext4_sb_info *sbi;
5624         struct super_block *sb;
5625         ext4_fsblk_t block = 0;
5626         unsigned int inquota = 0;
5627         unsigned int reserv_clstrs = 0;
5628         int retries = 0;
5629         u64 seq;
5630
5631         might_sleep();
5632         sb = ar->inode->i_sb;
5633         sbi = EXT4_SB(sb);
5634
5635         trace_ext4_request_blocks(ar);
5636         if (sbi->s_mount_state & EXT4_FC_REPLAY)
5637                 return ext4_mb_new_blocks_simple(handle, ar, errp);
5638
5639         /* Allow to use superuser reservation for quota file */
5640         if (ext4_is_quota_file(ar->inode))
5641                 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5642
5643         if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5644                 /* Without delayed allocation we need to verify
5645                  * there is enough free blocks to do block allocation
5646                  * and verify allocation doesn't exceed the quota limits.
5647                  */
5648                 while (ar->len &&
5649                         ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5650
5651                         /* let others to free the space */
5652                         cond_resched();
5653                         ar->len = ar->len >> 1;
5654                 }
5655                 if (!ar->len) {
5656                         ext4_mb_show_pa(sb);
5657                         *errp = -ENOSPC;
5658                         return 0;
5659                 }
5660                 reserv_clstrs = ar->len;
5661                 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5662                         dquot_alloc_block_nofail(ar->inode,
5663                                                  EXT4_C2B(sbi, ar->len));
5664                 } else {
5665                         while (ar->len &&
5666                                 dquot_alloc_block(ar->inode,
5667                                                   EXT4_C2B(sbi, ar->len))) {
5668
5669                                 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5670                                 ar->len--;
5671                         }
5672                 }
5673                 inquota = ar->len;
5674                 if (ar->len == 0) {
5675                         *errp = -EDQUOT;
5676                         goto out;
5677                 }
5678         }
5679
5680         ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5681         if (!ac) {
5682                 ar->len = 0;
5683                 *errp = -ENOMEM;
5684                 goto out;
5685         }
5686
5687         *errp = ext4_mb_initialize_context(ac, ar);
5688         if (*errp) {
5689                 ar->len = 0;
5690                 goto out;
5691         }
5692
5693         ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5694         seq = this_cpu_read(discard_pa_seq);
5695         if (!ext4_mb_use_preallocated(ac)) {
5696                 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5697                 ext4_mb_normalize_request(ac, ar);
5698
5699                 *errp = ext4_mb_pa_alloc(ac);
5700                 if (*errp)
5701                         goto errout;
5702 repeat:
5703                 /* allocate space in core */
5704                 *errp = ext4_mb_regular_allocator(ac);
5705                 /*
5706                  * pa allocated above is added to grp->bb_prealloc_list only
5707                  * when we were able to allocate some block i.e. when
5708                  * ac->ac_status == AC_STATUS_FOUND.
5709                  * And error from above mean ac->ac_status != AC_STATUS_FOUND
5710                  * So we have to free this pa here itself.
5711                  */
5712                 if (*errp) {
5713                         ext4_mb_pa_free(ac);
5714                         ext4_discard_allocated_blocks(ac);
5715                         goto errout;
5716                 }
5717                 if (ac->ac_status == AC_STATUS_FOUND &&
5718                         ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5719                         ext4_mb_pa_free(ac);
5720         }
5721         if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5722                 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5723                 if (*errp) {
5724                         ext4_discard_allocated_blocks(ac);
5725                         goto errout;
5726                 } else {
5727                         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5728                         ar->len = ac->ac_b_ex.fe_len;
5729                 }
5730         } else {
5731                 if (++retries < 3 &&
5732                     ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5733                         goto repeat;
5734                 /*
5735                  * If block allocation fails then the pa allocated above
5736                  * needs to be freed here itself.
5737                  */
5738                 ext4_mb_pa_free(ac);
5739                 *errp = -ENOSPC;
5740         }
5741
5742 errout:
5743         if (*errp) {
5744                 ac->ac_b_ex.fe_len = 0;
5745                 ar->len = 0;
5746                 ext4_mb_show_ac(ac);
5747         }
5748         ext4_mb_release_context(ac);
5749 out:
5750         if (ac)
5751                 kmem_cache_free(ext4_ac_cachep, ac);
5752         if (inquota && ar->len < inquota)
5753                 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5754         if (!ar->len) {
5755                 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5756                         /* release all the reserved blocks if non delalloc */
5757                         percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5758                                                 reserv_clstrs);
5759         }
5760
5761         trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5762
5763         return block;
5764 }
5765
5766 /*
5767  * We can merge two free data extents only if the physical blocks
5768  * are contiguous, AND the extents were freed by the same transaction,
5769  * AND the blocks are associated with the same group.
5770  */
5771 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5772                                         struct ext4_free_data *entry,
5773                                         struct ext4_free_data *new_entry,
5774                                         struct rb_root *entry_rb_root)
5775 {
5776         if ((entry->efd_tid != new_entry->efd_tid) ||
5777             (entry->efd_group != new_entry->efd_group))
5778                 return;
5779         if (entry->efd_start_cluster + entry->efd_count ==
5780             new_entry->efd_start_cluster) {
5781                 new_entry->efd_start_cluster = entry->efd_start_cluster;
5782                 new_entry->efd_count += entry->efd_count;
5783         } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5784                    entry->efd_start_cluster) {
5785                 new_entry->efd_count += entry->efd_count;
5786         } else
5787                 return;
5788         spin_lock(&sbi->s_md_lock);
5789         list_del(&entry->efd_list);
5790         spin_unlock(&sbi->s_md_lock);
5791         rb_erase(&entry->efd_node, entry_rb_root);
5792         kmem_cache_free(ext4_free_data_cachep, entry);
5793 }
5794
5795 static noinline_for_stack int
5796 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5797                       struct ext4_free_data *new_entry)
5798 {
5799         ext4_group_t group = e4b->bd_group;
5800         ext4_grpblk_t cluster;
5801         ext4_grpblk_t clusters = new_entry->efd_count;
5802         struct ext4_free_data *entry;
5803         struct ext4_group_info *db = e4b->bd_info;
5804         struct super_block *sb = e4b->bd_sb;
5805         struct ext4_sb_info *sbi = EXT4_SB(sb);
5806         struct rb_node **n = &db->bb_free_root.rb_node, *node;
5807         struct rb_node *parent = NULL, *new_node;
5808
5809         BUG_ON(!ext4_handle_valid(handle));
5810         BUG_ON(e4b->bd_bitmap_page == NULL);
5811         BUG_ON(e4b->bd_buddy_page == NULL);
5812
5813         new_node = &new_entry->efd_node;
5814         cluster = new_entry->efd_start_cluster;
5815
5816         if (!*n) {
5817                 /* first free block exent. We need to
5818                    protect buddy cache from being freed,
5819                  * otherwise we'll refresh it from
5820                  * on-disk bitmap and lose not-yet-available
5821                  * blocks */
5822                 get_page(e4b->bd_buddy_page);
5823                 get_page(e4b->bd_bitmap_page);
5824         }
5825         while (*n) {
5826                 parent = *n;
5827                 entry = rb_entry(parent, struct ext4_free_data, efd_node);
5828                 if (cluster < entry->efd_start_cluster)
5829                         n = &(*n)->rb_left;
5830                 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5831                         n = &(*n)->rb_right;
5832                 else {
5833                         ext4_grp_locked_error(sb, group, 0,
5834                                 ext4_group_first_block_no(sb, group) +
5835                                 EXT4_C2B(sbi, cluster),
5836                                 "Block already on to-be-freed list");
5837                         kmem_cache_free(ext4_free_data_cachep, new_entry);
5838                         return 0;
5839                 }
5840         }
5841
5842         rb_link_node(new_node, parent, n);
5843         rb_insert_color(new_node, &db->bb_free_root);
5844
5845         /* Now try to see the extent can be merged to left and right */
5846         node = rb_prev(new_node);
5847         if (node) {
5848                 entry = rb_entry(node, struct ext4_free_data, efd_node);
5849                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5850                                             &(db->bb_free_root));
5851         }
5852
5853         node = rb_next(new_node);
5854         if (node) {
5855                 entry = rb_entry(node, struct ext4_free_data, efd_node);
5856                 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5857                                             &(db->bb_free_root));
5858         }
5859
5860         spin_lock(&sbi->s_md_lock);
5861         list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5862         sbi->s_mb_free_pending += clusters;
5863         spin_unlock(&sbi->s_md_lock);
5864         return 0;
5865 }
5866
5867 /*
5868  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5869  * linearly starting at the goal block and also excludes the blocks which
5870  * are going to be in use after fast commit replay.
5871  */
5872 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5873                                 struct ext4_allocation_request *ar, int *errp)
5874 {
5875         struct buffer_head *bitmap_bh;
5876         struct super_block *sb = ar->inode->i_sb;
5877         ext4_group_t group;
5878         ext4_grpblk_t blkoff;
5879         ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5880         ext4_grpblk_t i = 0;
5881         ext4_fsblk_t goal, block;
5882         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5883
5884         goal = ar->goal;
5885         if (goal < le32_to_cpu(es->s_first_data_block) ||
5886                         goal >= ext4_blocks_count(es))
5887                 goal = le32_to_cpu(es->s_first_data_block);
5888
5889         ar->len = 0;
5890         ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5891         for (; group < ext4_get_groups_count(sb); group++) {
5892                 bitmap_bh = ext4_read_block_bitmap(sb, group);
5893                 if (IS_ERR(bitmap_bh)) {
5894                         *errp = PTR_ERR(bitmap_bh);
5895                         pr_warn("Failed to read block bitmap\n");
5896                         return 0;
5897                 }
5898
5899                 ext4_get_group_no_and_offset(sb,
5900                         max(ext4_group_first_block_no(sb, group), goal),
5901                         NULL, &blkoff);
5902                 while (1) {
5903                         i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5904                                                 blkoff);
5905                         if (i >= max)
5906                                 break;
5907                         if (ext4_fc_replay_check_excluded(sb,
5908                                 ext4_group_first_block_no(sb, group) + i)) {
5909                                 blkoff = i + 1;
5910                         } else
5911                                 break;
5912                 }
5913                 brelse(bitmap_bh);
5914                 if (i < max)
5915                         break;
5916         }
5917
5918         if (group >= ext4_get_groups_count(sb) || i >= max) {
5919                 *errp = -ENOSPC;
5920                 return 0;
5921         }
5922
5923         block = ext4_group_first_block_no(sb, group) + i;
5924         ext4_mb_mark_bb(sb, block, 1, 1);
5925         ar->len = 1;
5926
5927         return block;
5928 }
5929
5930 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5931                                         unsigned long count)
5932 {
5933         struct buffer_head *bitmap_bh;
5934         struct super_block *sb = inode->i_sb;
5935         struct ext4_group_desc *gdp;
5936         struct buffer_head *gdp_bh;
5937         ext4_group_t group;
5938         ext4_grpblk_t blkoff;
5939         int already_freed = 0, err, i;
5940
5941         ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5942         bitmap_bh = ext4_read_block_bitmap(sb, group);
5943         if (IS_ERR(bitmap_bh)) {
5944                 err = PTR_ERR(bitmap_bh);
5945                 pr_warn("Failed to read block bitmap\n");
5946                 return;
5947         }
5948         gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5949         if (!gdp)
5950                 return;
5951
5952         for (i = 0; i < count; i++) {
5953                 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5954                         already_freed++;
5955         }
5956         mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5957         err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5958         if (err)
5959                 return;
5960         ext4_free_group_clusters_set(
5961                 sb, gdp, ext4_free_group_clusters(sb, gdp) +
5962                 count - already_freed);
5963         ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5964         ext4_group_desc_csum_set(sb, group, gdp);
5965         ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5966         sync_dirty_buffer(bitmap_bh);
5967         sync_dirty_buffer(gdp_bh);
5968         brelse(bitmap_bh);
5969 }
5970
5971 /**
5972  * ext4_mb_clear_bb() -- helper function for freeing blocks.
5973  *                      Used by ext4_free_blocks()
5974  * @handle:             handle for this transaction
5975  * @inode:              inode
5976  * @block:              starting physical block to be freed
5977  * @count:              number of blocks to be freed
5978  * @flags:              flags used by ext4_free_blocks
5979  */
5980 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5981                                ext4_fsblk_t block, unsigned long count,
5982                                int flags)
5983 {
5984         struct buffer_head *bitmap_bh = NULL;
5985         struct super_block *sb = inode->i_sb;
5986         struct ext4_group_desc *gdp;
5987         struct ext4_group_info *grp;
5988         unsigned int overflow;
5989         ext4_grpblk_t bit;
5990         struct buffer_head *gd_bh;
5991         ext4_group_t block_group;
5992         struct ext4_sb_info *sbi;
5993         struct ext4_buddy e4b;
5994         unsigned int count_clusters;
5995         int err = 0;
5996         int ret;
5997
5998         sbi = EXT4_SB(sb);
5999
6000         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6001             !ext4_inode_block_valid(inode, block, count)) {
6002                 ext4_error(sb, "Freeing blocks in system zone - "
6003                            "Block = %llu, count = %lu", block, count);
6004                 /* err = 0. ext4_std_error should be a no op */
6005                 goto error_return;
6006         }
6007         flags |= EXT4_FREE_BLOCKS_VALIDATED;
6008
6009 do_more:
6010         overflow = 0;
6011         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6012
6013         grp = ext4_get_group_info(sb, block_group);
6014         if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
6015                 return;
6016
6017         /*
6018          * Check to see if we are freeing blocks across a group
6019          * boundary.
6020          */
6021         if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6022                 overflow = EXT4_C2B(sbi, bit) + count -
6023                         EXT4_BLOCKS_PER_GROUP(sb);
6024                 count -= overflow;
6025                 /* The range changed so it's no longer validated */
6026                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6027         }
6028         count_clusters = EXT4_NUM_B2C(sbi, count);
6029         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6030         if (IS_ERR(bitmap_bh)) {
6031                 err = PTR_ERR(bitmap_bh);
6032                 bitmap_bh = NULL;
6033                 goto error_return;
6034         }
6035         gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
6036         if (!gdp) {
6037                 err = -EIO;
6038                 goto error_return;
6039         }
6040
6041         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6042             !ext4_inode_block_valid(inode, block, count)) {
6043                 ext4_error(sb, "Freeing blocks in system zone - "
6044                            "Block = %llu, count = %lu", block, count);
6045                 /* err = 0. ext4_std_error should be a no op */
6046                 goto error_return;
6047         }
6048
6049         BUFFER_TRACE(bitmap_bh, "getting write access");
6050         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6051                                             EXT4_JTR_NONE);
6052         if (err)
6053                 goto error_return;
6054
6055         /*
6056          * We are about to modify some metadata.  Call the journal APIs
6057          * to unshare ->b_data if a currently-committing transaction is
6058          * using it
6059          */
6060         BUFFER_TRACE(gd_bh, "get_write_access");
6061         err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6062         if (err)
6063                 goto error_return;
6064 #ifdef AGGRESSIVE_CHECK
6065         {
6066                 int i;
6067                 for (i = 0; i < count_clusters; i++)
6068                         BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
6069         }
6070 #endif
6071         trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6072
6073         /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6074         err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6075                                      GFP_NOFS|__GFP_NOFAIL);
6076         if (err)
6077                 goto error_return;
6078
6079         /*
6080          * We need to make sure we don't reuse the freed block until after the
6081          * transaction is committed. We make an exception if the inode is to be
6082          * written in writeback mode since writeback mode has weak data
6083          * consistency guarantees.
6084          */
6085         if (ext4_handle_valid(handle) &&
6086             ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6087              !ext4_should_writeback_data(inode))) {
6088                 struct ext4_free_data *new_entry;
6089                 /*
6090                  * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6091                  * to fail.
6092                  */
6093                 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6094                                 GFP_NOFS|__GFP_NOFAIL);
6095                 new_entry->efd_start_cluster = bit;
6096                 new_entry->efd_group = block_group;
6097                 new_entry->efd_count = count_clusters;
6098                 new_entry->efd_tid = handle->h_transaction->t_tid;
6099
6100                 ext4_lock_group(sb, block_group);
6101                 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6102                 ext4_mb_free_metadata(handle, &e4b, new_entry);
6103         } else {
6104                 /* need to update group_info->bb_free and bitmap
6105                  * with group lock held. generate_buddy look at
6106                  * them with group lock_held
6107                  */
6108                 if (test_opt(sb, DISCARD)) {
6109                         err = ext4_issue_discard(sb, block_group, bit,
6110                                                  count_clusters, NULL);
6111                         if (err && err != -EOPNOTSUPP)
6112                                 ext4_msg(sb, KERN_WARNING, "discard request in"
6113                                          " group:%u block:%d count:%lu failed"
6114                                          " with %d", block_group, bit, count,
6115                                          err);
6116                 } else
6117                         EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6118
6119                 ext4_lock_group(sb, block_group);
6120                 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6121                 mb_free_blocks(inode, &e4b, bit, count_clusters);
6122         }
6123
6124         ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6125         ext4_free_group_clusters_set(sb, gdp, ret);
6126         ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
6127         ext4_group_desc_csum_set(sb, block_group, gdp);
6128         ext4_unlock_group(sb, block_group);
6129
6130         if (sbi->s_log_groups_per_flex) {
6131                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6132                 atomic64_add(count_clusters,
6133                              &sbi_array_rcu_deref(sbi, s_flex_groups,
6134                                                   flex_group)->free_clusters);
6135         }
6136
6137         /*
6138          * on a bigalloc file system, defer the s_freeclusters_counter
6139          * update to the caller (ext4_remove_space and friends) so they
6140          * can determine if a cluster freed here should be rereserved
6141          */
6142         if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6143                 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6144                         dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6145                 percpu_counter_add(&sbi->s_freeclusters_counter,
6146                                    count_clusters);
6147         }
6148
6149         ext4_mb_unload_buddy(&e4b);
6150
6151         /* We dirtied the bitmap block */
6152         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6153         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6154
6155         /* And the group descriptor block */
6156         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6157         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6158         if (!err)
6159                 err = ret;
6160
6161         if (overflow && !err) {
6162                 block += count;
6163                 count = overflow;
6164                 put_bh(bitmap_bh);
6165                 /* The range changed so it's no longer validated */
6166                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6167                 goto do_more;
6168         }
6169 error_return:
6170         brelse(bitmap_bh);
6171         ext4_std_error(sb, err);
6172         return;
6173 }
6174
6175 /**
6176  * ext4_free_blocks() -- Free given blocks and update quota
6177  * @handle:             handle for this transaction
6178  * @inode:              inode
6179  * @bh:                 optional buffer of the block to be freed
6180  * @block:              starting physical block to be freed
6181  * @count:              number of blocks to be freed
6182  * @flags:              flags used by ext4_free_blocks
6183  */
6184 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6185                       struct buffer_head *bh, ext4_fsblk_t block,
6186                       unsigned long count, int flags)
6187 {
6188         struct super_block *sb = inode->i_sb;
6189         unsigned int overflow;
6190         struct ext4_sb_info *sbi;
6191
6192         sbi = EXT4_SB(sb);
6193
6194         if (bh) {
6195                 if (block)
6196                         BUG_ON(block != bh->b_blocknr);
6197                 else
6198                         block = bh->b_blocknr;
6199         }
6200
6201         if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6202                 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
6203                 return;
6204         }
6205
6206         might_sleep();
6207
6208         if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6209             !ext4_inode_block_valid(inode, block, count)) {
6210                 ext4_error(sb, "Freeing blocks not in datazone - "
6211                            "block = %llu, count = %lu", block, count);
6212                 return;
6213         }
6214         flags |= EXT4_FREE_BLOCKS_VALIDATED;
6215
6216         ext4_debug("freeing block %llu\n", block);
6217         trace_ext4_free_blocks(inode, block, count, flags);
6218
6219         if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6220                 BUG_ON(count > 1);
6221
6222                 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6223                             inode, bh, block);
6224         }
6225
6226         /*
6227          * If the extent to be freed does not begin on a cluster
6228          * boundary, we need to deal with partial clusters at the
6229          * beginning and end of the extent.  Normally we will free
6230          * blocks at the beginning or the end unless we are explicitly
6231          * requested to avoid doing so.
6232          */
6233         overflow = EXT4_PBLK_COFF(sbi, block);
6234         if (overflow) {
6235                 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6236                         overflow = sbi->s_cluster_ratio - overflow;
6237                         block += overflow;
6238                         if (count > overflow)
6239                                 count -= overflow;
6240                         else
6241                                 return;
6242                 } else {
6243                         block -= overflow;
6244                         count += overflow;
6245                 }
6246                 /* The range changed so it's no longer validated */
6247                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6248         }
6249         overflow = EXT4_LBLK_COFF(sbi, count);
6250         if (overflow) {
6251                 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6252                         if (count > overflow)
6253                                 count -= overflow;
6254                         else
6255                                 return;
6256                 } else
6257                         count += sbi->s_cluster_ratio - overflow;
6258                 /* The range changed so it's no longer validated */
6259                 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6260         }
6261
6262         if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6263                 int i;
6264                 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6265
6266                 for (i = 0; i < count; i++) {
6267                         cond_resched();
6268                         if (is_metadata)
6269                                 bh = sb_find_get_block(inode->i_sb, block + i);
6270                         ext4_forget(handle, is_metadata, inode, bh, block + i);
6271                 }
6272         }
6273
6274         ext4_mb_clear_bb(handle, inode, block, count, flags);
6275         return;
6276 }
6277
6278 /**
6279  * ext4_group_add_blocks() -- Add given blocks to an existing group
6280  * @handle:                     handle to this transaction
6281  * @sb:                         super block
6282  * @block:                      start physical block to add to the block group
6283  * @count:                      number of blocks to free
6284  *
6285  * This marks the blocks as free in the bitmap and buddy.
6286  */
6287 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6288                          ext4_fsblk_t block, unsigned long count)
6289 {
6290         struct buffer_head *bitmap_bh = NULL;
6291         struct buffer_head *gd_bh;
6292         ext4_group_t block_group;
6293         ext4_grpblk_t bit;
6294         unsigned int i;
6295         struct ext4_group_desc *desc;
6296         struct ext4_sb_info *sbi = EXT4_SB(sb);
6297         struct ext4_buddy e4b;
6298         int err = 0, ret, free_clusters_count;
6299         ext4_grpblk_t clusters_freed;
6300         ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6301         ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6302         unsigned long cluster_count = last_cluster - first_cluster + 1;
6303
6304         ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6305
6306         if (count == 0)
6307                 return 0;
6308
6309         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6310         /*
6311          * Check to see if we are freeing blocks across a group
6312          * boundary.
6313          */
6314         if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6315                 ext4_warning(sb, "too many blocks added to group %u",
6316                              block_group);
6317                 err = -EINVAL;
6318                 goto error_return;
6319         }
6320
6321         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6322         if (IS_ERR(bitmap_bh)) {
6323                 err = PTR_ERR(bitmap_bh);
6324                 bitmap_bh = NULL;
6325                 goto error_return;
6326         }
6327
6328         desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6329         if (!desc) {
6330                 err = -EIO;
6331                 goto error_return;
6332         }
6333
6334         if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6335                 ext4_error(sb, "Adding blocks in system zones - "
6336                            "Block = %llu, count = %lu",
6337                            block, count);
6338                 err = -EINVAL;
6339                 goto error_return;
6340         }
6341
6342         BUFFER_TRACE(bitmap_bh, "getting write access");
6343         err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6344                                             EXT4_JTR_NONE);
6345         if (err)
6346                 goto error_return;
6347
6348         /*
6349          * We are about to modify some metadata.  Call the journal APIs
6350          * to unshare ->b_data if a currently-committing transaction is
6351          * using it
6352          */
6353         BUFFER_TRACE(gd_bh, "get_write_access");
6354         err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6355         if (err)
6356                 goto error_return;
6357
6358         for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6359                 BUFFER_TRACE(bitmap_bh, "clear bit");
6360                 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6361                         ext4_error(sb, "bit already cleared for block %llu",
6362                                    (ext4_fsblk_t)(block + i));
6363                         BUFFER_TRACE(bitmap_bh, "bit already cleared");
6364                 } else {
6365                         clusters_freed++;
6366                 }
6367         }
6368
6369         err = ext4_mb_load_buddy(sb, block_group, &e4b);
6370         if (err)
6371                 goto error_return;
6372
6373         /*
6374          * need to update group_info->bb_free and bitmap
6375          * with group lock held. generate_buddy look at
6376          * them with group lock_held
6377          */
6378         ext4_lock_group(sb, block_group);
6379         mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6380         mb_free_blocks(NULL, &e4b, bit, cluster_count);
6381         free_clusters_count = clusters_freed +
6382                 ext4_free_group_clusters(sb, desc);
6383         ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6384         ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
6385         ext4_group_desc_csum_set(sb, block_group, desc);
6386         ext4_unlock_group(sb, block_group);
6387         percpu_counter_add(&sbi->s_freeclusters_counter,
6388                            clusters_freed);
6389
6390         if (sbi->s_log_groups_per_flex) {
6391                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6392                 atomic64_add(clusters_freed,
6393                              &sbi_array_rcu_deref(sbi, s_flex_groups,
6394                                                   flex_group)->free_clusters);
6395         }
6396
6397         ext4_mb_unload_buddy(&e4b);
6398
6399         /* We dirtied the bitmap block */
6400         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6401         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6402
6403         /* And the group descriptor block */
6404         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6405         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6406         if (!err)
6407                 err = ret;
6408
6409 error_return:
6410         brelse(bitmap_bh);
6411         ext4_std_error(sb, err);
6412         return err;
6413 }
6414
6415 /**
6416  * ext4_trim_extent -- function to TRIM one single free extent in the group
6417  * @sb:         super block for the file system
6418  * @start:      starting block of the free extent in the alloc. group
6419  * @count:      number of blocks to TRIM
6420  * @e4b:        ext4 buddy for the group
6421  *
6422  * Trim "count" blocks starting at "start" in the "group". To assure that no
6423  * one will allocate those blocks, mark it as used in buddy bitmap. This must
6424  * be called with under the group lock.
6425  */
6426 static int ext4_trim_extent(struct super_block *sb,
6427                 int start, int count, struct ext4_buddy *e4b)
6428 __releases(bitlock)
6429 __acquires(bitlock)
6430 {
6431         struct ext4_free_extent ex;
6432         ext4_group_t group = e4b->bd_group;
6433         int ret = 0;
6434
6435         trace_ext4_trim_extent(sb, group, start, count);
6436
6437         assert_spin_locked(ext4_group_lock_ptr(sb, group));
6438
6439         ex.fe_start = start;
6440         ex.fe_group = group;
6441         ex.fe_len = count;
6442
6443         /*
6444          * Mark blocks used, so no one can reuse them while
6445          * being trimmed.
6446          */
6447         mb_mark_used(e4b, &ex);
6448         ext4_unlock_group(sb, group);
6449         ret = ext4_issue_discard(sb, group, start, count, NULL);
6450         ext4_lock_group(sb, group);
6451         mb_free_blocks(NULL, e4b, start, ex.fe_len);
6452         return ret;
6453 }
6454
6455 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6456                                            ext4_group_t grp)
6457 {
6458         unsigned long nr_clusters_in_group;
6459
6460         if (grp < (ext4_get_groups_count(sb) - 1))
6461                 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
6462         else
6463                 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6464                                         ext4_group_first_block_no(sb, grp))
6465                                        >> EXT4_CLUSTER_BITS(sb);
6466
6467         return nr_clusters_in_group - 1;
6468 }
6469
6470 static bool ext4_trim_interrupted(void)
6471 {
6472         return fatal_signal_pending(current) || freezing(current);
6473 }
6474
6475 static int ext4_try_to_trim_range(struct super_block *sb,
6476                 struct ext4_buddy *e4b, ext4_grpblk_t start,
6477                 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6478 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6479 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6480 {
6481         ext4_grpblk_t next, count, free_count, last, origin_start;
6482         bool set_trimmed = false;
6483         void *bitmap;
6484
6485         last = ext4_last_grp_cluster(sb, e4b->bd_group);
6486         bitmap = e4b->bd_bitmap;
6487         if (start == 0 && max >= last)
6488                 set_trimmed = true;
6489         origin_start = start;
6490         start = max(e4b->bd_info->bb_first_free, start);
6491         count = 0;
6492         free_count = 0;
6493
6494         while (start <= max) {
6495                 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6496                 if (start > max)
6497                         break;
6498
6499                 next = mb_find_next_bit(bitmap, last + 1, start);
6500                 if (origin_start == 0 && next >= last)
6501                         set_trimmed = true;
6502
6503                 if ((next - start) >= minblocks) {
6504                         int ret = ext4_trim_extent(sb, start, next - start, e4b);
6505
6506                         if (ret && ret != -EOPNOTSUPP)
6507                                 return count;
6508                         count += next - start;
6509                 }
6510                 free_count += next - start;
6511                 start = next + 1;
6512
6513                 if (ext4_trim_interrupted())
6514                         return count;
6515
6516                 if (need_resched()) {
6517                         ext4_unlock_group(sb, e4b->bd_group);
6518                         cond_resched();
6519                         ext4_lock_group(sb, e4b->bd_group);
6520                 }
6521
6522                 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6523                         break;
6524         }
6525
6526         if (set_trimmed)
6527                 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6528
6529         return count;
6530 }
6531
6532 /**
6533  * ext4_trim_all_free -- function to trim all free space in alloc. group
6534  * @sb:                 super block for file system
6535  * @group:              group to be trimmed
6536  * @start:              first group block to examine
6537  * @max:                last group block to examine
6538  * @minblocks:          minimum extent block count
6539  *
6540  * ext4_trim_all_free walks through group's block bitmap searching for free
6541  * extents. When the free extent is found, mark it as used in group buddy
6542  * bitmap. Then issue a TRIM command on this extent and free the extent in
6543  * the group buddy bitmap.
6544  */
6545 static ext4_grpblk_t
6546 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6547                    ext4_grpblk_t start, ext4_grpblk_t max,
6548                    ext4_grpblk_t minblocks)
6549 {
6550         struct ext4_buddy e4b;
6551         int ret;
6552
6553         trace_ext4_trim_all_free(sb, group, start, max);
6554
6555         ret = ext4_mb_load_buddy(sb, group, &e4b);
6556         if (ret) {
6557                 ext4_warning(sb, "Error %d loading buddy information for %u",
6558                              ret, group);
6559                 return ret;
6560         }
6561
6562         ext4_lock_group(sb, group);
6563
6564         if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6565             minblocks < EXT4_SB(sb)->s_last_trim_minblks)
6566                 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6567         else
6568                 ret = 0;
6569
6570         ext4_unlock_group(sb, group);
6571         ext4_mb_unload_buddy(&e4b);
6572
6573         ext4_debug("trimmed %d blocks in the group %d\n",
6574                 ret, group);
6575
6576         return ret;
6577 }
6578
6579 /**
6580  * ext4_trim_fs() -- trim ioctl handle function
6581  * @sb:                 superblock for filesystem
6582  * @range:              fstrim_range structure
6583  *
6584  * start:       First Byte to trim
6585  * len:         number of Bytes to trim from start
6586  * minlen:      minimum extent length in Bytes
6587  * ext4_trim_fs goes through all allocation groups containing Bytes from
6588  * start to start+len. For each such a group ext4_trim_all_free function
6589  * is invoked to trim all free space.
6590  */
6591 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6592 {
6593         unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6594         struct ext4_group_info *grp;
6595         ext4_group_t group, first_group, last_group;
6596         ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6597         uint64_t start, end, minlen, trimmed = 0;
6598         ext4_fsblk_t first_data_blk =
6599                         le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6600         ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6601         int ret = 0;
6602
6603         start = range->start >> sb->s_blocksize_bits;
6604         end = start + (range->len >> sb->s_blocksize_bits) - 1;
6605         minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6606                               range->minlen >> sb->s_blocksize_bits);
6607
6608         if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6609             start >= max_blks ||
6610             range->len < sb->s_blocksize)
6611                 return -EINVAL;
6612         /* No point to try to trim less than discard granularity */
6613         if (range->minlen < discard_granularity) {
6614                 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6615                                 discard_granularity >> sb->s_blocksize_bits);
6616                 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6617                         goto out;
6618         }
6619         if (end >= max_blks - 1)
6620                 end = max_blks - 1;
6621         if (end <= first_data_blk)
6622                 goto out;
6623         if (start < first_data_blk)
6624                 start = first_data_blk;
6625
6626         /* Determine first and last group to examine based on start and end */
6627         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6628                                      &first_group, &first_cluster);
6629         ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6630                                      &last_group, &last_cluster);
6631
6632         /* end now represents the last cluster to discard in this group */
6633         end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6634
6635         for (group = first_group; group <= last_group; group++) {
6636                 if (ext4_trim_interrupted())
6637                         break;
6638                 grp = ext4_get_group_info(sb, group);
6639                 if (!grp)
6640                         continue;
6641                 /* We only do this if the grp has never been initialized */
6642                 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6643                         ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6644                         if (ret)
6645                                 break;
6646                 }
6647
6648                 /*
6649                  * For all the groups except the last one, last cluster will
6650                  * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6651                  * change it for the last group, note that last_cluster is
6652                  * already computed earlier by ext4_get_group_no_and_offset()
6653                  */
6654                 if (group == last_group)
6655                         end = last_cluster;
6656                 if (grp->bb_free >= minlen) {
6657                         cnt = ext4_trim_all_free(sb, group, first_cluster,
6658                                                  end, minlen);
6659                         if (cnt < 0) {
6660                                 ret = cnt;
6661                                 break;
6662                         }
6663                         trimmed += cnt;
6664                 }
6665
6666                 /*
6667                  * For every group except the first one, we are sure
6668                  * that the first cluster to discard will be cluster #0.
6669                  */
6670                 first_cluster = 0;
6671         }
6672
6673         if (!ret)
6674                 EXT4_SB(sb)->s_last_trim_minblks = minlen;
6675
6676 out:
6677         range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6678         return ret;
6679 }
6680
6681 /* Iterate all the free extents in the group. */
6682 int
6683 ext4_mballoc_query_range(
6684         struct super_block              *sb,
6685         ext4_group_t                    group,
6686         ext4_grpblk_t                   start,
6687         ext4_grpblk_t                   end,
6688         ext4_mballoc_query_range_fn     formatter,
6689         void                            *priv)
6690 {
6691         void                            *bitmap;
6692         ext4_grpblk_t                   next;
6693         struct ext4_buddy               e4b;
6694         int                             error;
6695
6696         error = ext4_mb_load_buddy(sb, group, &e4b);
6697         if (error)
6698                 return error;
6699         bitmap = e4b.bd_bitmap;
6700
6701         ext4_lock_group(sb, group);
6702
6703         start = max(e4b.bd_info->bb_first_free, start);
6704         if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6705                 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6706
6707         while (start <= end) {
6708                 start = mb_find_next_zero_bit(bitmap, end + 1, start);
6709                 if (start > end)
6710                         break;
6711                 next = mb_find_next_bit(bitmap, end + 1, start);
6712
6713                 ext4_unlock_group(sb, group);
6714                 error = formatter(sb, group, start, next - start, priv);
6715                 if (error)
6716                         goto out_unload;
6717                 ext4_lock_group(sb, group);
6718
6719                 start = next + 1;
6720         }
6721
6722         ext4_unlock_group(sb, group);
6723 out_unload:
6724         ext4_mb_unload_buddy(&e4b);
6725
6726         return error;
6727 }