1 // SPDX-License-Identifier: GPL-2.0
3 * fs/ext4/extents_status.c
5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
7 * Allison Henderson <achender@linux.vnet.ibm.com>
8 * Hugh Dickins <hughd@google.com>
9 * Zheng Liu <wenqing.lz@taobao.com>
11 * Ext4 extents status tree core functions.
13 #include <linux/list_sort.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
18 #include <trace/events/ext4.h>
21 * According to previous discussion in Ext4 Developer Workshop, we
22 * will introduce a new structure called io tree to track all extent
23 * status in order to solve some problems that we have met
24 * (e.g. Reservation space warning), and provide extent-level locking.
25 * Delay extent tree is the first step to achieve this goal. It is
26 * original built by Yongqiang Yang. At that time it is called delay
27 * extent tree, whose goal is only track delayed extents in memory to
28 * simplify the implementation of fiemap and bigalloc, and introduce
29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
30 * delay extent tree at the first commit. But for better understand
31 * what it does, it has been rename to extent status tree.
34 * Currently the first step has been done. All delayed extents are
35 * tracked in the tree. It maintains the delayed extent when a delayed
36 * allocation is issued, and the delayed extent is written out or
37 * invalidated. Therefore the implementation of fiemap and bigalloc
38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
40 * The following comment describes the implemenmtation of extent
41 * status tree and future works.
44 * In this step all extent status are tracked by extent status tree.
45 * Thus, we can first try to lookup a block mapping in this tree before
46 * finding it in extent tree. Hence, single extent cache can be removed
47 * because extent status tree can do a better job. Extents in status
48 * tree are loaded on-demand. Therefore, the extent status tree may not
49 * contain all of the extents in a file. Meanwhile we define a shrinker
50 * to reclaim memory from extent status tree because fragmented extent
51 * tree will make status tree cost too much memory. written/unwritten/-
52 * hole extents in the tree will be reclaimed by this shrinker when we
53 * are under high memory pressure. Delayed extents will not be
54 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
58 * Extent status tree implementation for ext4.
61 * ==========================================================================
62 * Extent status tree tracks all extent status.
64 * 1. Why we need to implement extent status tree?
66 * Without extent status tree, ext4 identifies a delayed extent by looking
67 * up page cache, this has several deficiencies - complicated, buggy,
68 * and inefficient code.
70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
71 * block or a range of blocks are belonged to a delayed extent.
73 * Let us have a look at how they do without extent status tree.
75 * FIEMAP looks up page cache to identify delayed allocations from holes.
78 * SEEK_HOLE/DATA has the same problem as FIEMAP.
81 * bigalloc looks up page cache to figure out if a block is
82 * already under delayed allocation or not to determine whether
83 * quota reserving is needed for the cluster.
86 * Writeout looks up whole page cache to see if a buffer is
87 * mapped, If there are not very many delayed buffers, then it is
90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
91 * bigalloc and writeout can figure out if a block or a range of
92 * blocks is under delayed allocation(belonged to a delayed extent) or
93 * not by searching the extent tree.
96 * ==========================================================================
97 * 2. Ext4 extent status tree impelmentation
100 * A extent is a range of blocks which are contiguous logically and
101 * physically. Unlike extent in extent tree, this extent in ext4 is
102 * a in-memory struct, there is no corresponding on-disk data. There
103 * is no limit on length of extent, so an extent can contain as many
104 * blocks as they are contiguous logically and physically.
106 * -- extent status tree
107 * Every inode has an extent status tree and all allocation blocks
108 * are added to the tree with different status. The extent in the
109 * tree are ordered by logical block no.
111 * -- operations on a extent status tree
112 * There are three important operations on a delayed extent tree: find
113 * next extent, adding a extent(a range of blocks) and removing a extent.
115 * -- race on a extent status tree
116 * Extent status tree is protected by inode->i_es_lock.
118 * -- memory consumption
119 * Fragmented extent tree will make extent status tree cost too much
120 * memory. Hence, we will reclaim written/unwritten/hole extents from
121 * the tree under a heavy memory pressure.
124 * ==========================================================================
125 * 3. Performance analysis
128 * 1. There is a cache extent for write access, so if writes are
129 * not very random, adding space operaions are in O(1) time.
132 * 2. Code is much simpler, more readable, more maintainable and
136 * ==========================================================================
139 * -- Refactor delayed space reservation
141 * -- Extent-level locking
144 static struct kmem_cache *ext4_es_cachep;
145 static struct kmem_cache *ext4_pending_cachep;
147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
148 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
149 ext4_lblk_t end, int *reserved);
150 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
151 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
152 struct ext4_inode_info *locked_ei);
153 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
156 int __init ext4_init_es(void)
158 ext4_es_cachep = kmem_cache_create("ext4_extent_status",
159 sizeof(struct extent_status),
160 0, (SLAB_RECLAIM_ACCOUNT), NULL);
161 if (ext4_es_cachep == NULL)
166 void ext4_exit_es(void)
168 kmem_cache_destroy(ext4_es_cachep);
171 void ext4_es_init_tree(struct ext4_es_tree *tree)
173 tree->root = RB_ROOT;
174 tree->cache_es = NULL;
178 static void ext4_es_print_tree(struct inode *inode)
180 struct ext4_es_tree *tree;
181 struct rb_node *node;
183 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
184 tree = &EXT4_I(inode)->i_es_tree;
185 node = rb_first(&tree->root);
187 struct extent_status *es;
188 es = rb_entry(node, struct extent_status, rb_node);
189 printk(KERN_DEBUG " [%u/%u) %llu %x",
190 es->es_lblk, es->es_len,
191 ext4_es_pblock(es), ext4_es_status(es));
192 node = rb_next(node);
194 printk(KERN_DEBUG "\n");
197 #define ext4_es_print_tree(inode)
200 static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
202 BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
203 return es->es_lblk + es->es_len - 1;
207 * search through the tree for an delayed extent with a given offset. If
208 * it can't be found, try to find next extent.
210 static struct extent_status *__es_tree_search(struct rb_root *root,
213 struct rb_node *node = root->rb_node;
214 struct extent_status *es = NULL;
217 es = rb_entry(node, struct extent_status, rb_node);
218 if (lblk < es->es_lblk)
219 node = node->rb_left;
220 else if (lblk > ext4_es_end(es))
221 node = node->rb_right;
226 if (es && lblk < es->es_lblk)
229 if (es && lblk > ext4_es_end(es)) {
230 node = rb_next(&es->rb_node);
231 return node ? rb_entry(node, struct extent_status, rb_node) :
239 * ext4_es_find_extent_range - find extent with specified status within block
240 * range or next extent following block range in
241 * extents status tree
243 * @inode - file containing the range
244 * @matching_fn - pointer to function that matches extents with desired status
245 * @lblk - logical block defining start of range
246 * @end - logical block defining end of range
247 * @es - extent found, if any
249 * Find the first extent within the block range specified by @lblk and @end
250 * in the extents status tree that satisfies @matching_fn. If a match
251 * is found, it's returned in @es. If not, and a matching extent is found
252 * beyond the block range, it's returned in @es. If no match is found, an
253 * extent is returned in @es whose es_lblk, es_len, and es_pblk components
256 static void __es_find_extent_range(struct inode *inode,
257 int (*matching_fn)(struct extent_status *es),
258 ext4_lblk_t lblk, ext4_lblk_t end,
259 struct extent_status *es)
261 struct ext4_es_tree *tree = NULL;
262 struct extent_status *es1 = NULL;
263 struct rb_node *node;
268 tree = &EXT4_I(inode)->i_es_tree;
270 /* see if the extent has been cached */
271 es->es_lblk = es->es_len = es->es_pblk = 0;
272 es1 = READ_ONCE(tree->cache_es);
273 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
274 es_debug("%u cached by [%u/%u) %llu %x\n",
275 lblk, es1->es_lblk, es1->es_len,
276 ext4_es_pblock(es1), ext4_es_status(es1));
280 es1 = __es_tree_search(&tree->root, lblk);
283 if (es1 && !matching_fn(es1)) {
284 while ((node = rb_next(&es1->rb_node)) != NULL) {
285 es1 = rb_entry(node, struct extent_status, rb_node);
286 if (es1->es_lblk > end) {
290 if (matching_fn(es1))
295 if (es1 && matching_fn(es1)) {
296 WRITE_ONCE(tree->cache_es, es1);
297 es->es_lblk = es1->es_lblk;
298 es->es_len = es1->es_len;
299 es->es_pblk = es1->es_pblk;
305 * Locking for __es_find_extent_range() for external use
307 void ext4_es_find_extent_range(struct inode *inode,
308 int (*matching_fn)(struct extent_status *es),
309 ext4_lblk_t lblk, ext4_lblk_t end,
310 struct extent_status *es)
312 trace_ext4_es_find_extent_range_enter(inode, lblk);
314 read_lock(&EXT4_I(inode)->i_es_lock);
315 __es_find_extent_range(inode, matching_fn, lblk, end, es);
316 read_unlock(&EXT4_I(inode)->i_es_lock);
318 trace_ext4_es_find_extent_range_exit(inode, es);
322 * __es_scan_range - search block range for block with specified status
323 * in extents status tree
325 * @inode - file containing the range
326 * @matching_fn - pointer to function that matches extents with desired status
327 * @lblk - logical block defining start of range
328 * @end - logical block defining end of range
330 * Returns true if at least one block in the specified block range satisfies
331 * the criterion specified by @matching_fn, and false if not. If at least
332 * one extent has the specified status, then there is at least one block
333 * in the cluster with that status. Should only be called by code that has
336 static bool __es_scan_range(struct inode *inode,
337 int (*matching_fn)(struct extent_status *es),
338 ext4_lblk_t start, ext4_lblk_t end)
340 struct extent_status es;
342 __es_find_extent_range(inode, matching_fn, start, end, &es);
344 return false; /* no matching extent in the tree */
345 else if (es.es_lblk <= start &&
346 start < es.es_lblk + es.es_len)
348 else if (start <= es.es_lblk && es.es_lblk <= end)
354 * Locking for __es_scan_range() for external use
356 bool ext4_es_scan_range(struct inode *inode,
357 int (*matching_fn)(struct extent_status *es),
358 ext4_lblk_t lblk, ext4_lblk_t end)
362 read_lock(&EXT4_I(inode)->i_es_lock);
363 ret = __es_scan_range(inode, matching_fn, lblk, end);
364 read_unlock(&EXT4_I(inode)->i_es_lock);
370 * __es_scan_clu - search cluster for block with specified status in
371 * extents status tree
373 * @inode - file containing the cluster
374 * @matching_fn - pointer to function that matches extents with desired status
375 * @lblk - logical block in cluster to be searched
377 * Returns true if at least one extent in the cluster containing @lblk
378 * satisfies the criterion specified by @matching_fn, and false if not. If at
379 * least one extent has the specified status, then there is at least one block
380 * in the cluster with that status. Should only be called by code that has
383 static bool __es_scan_clu(struct inode *inode,
384 int (*matching_fn)(struct extent_status *es),
387 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
388 ext4_lblk_t lblk_start, lblk_end;
390 lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
391 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
393 return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
397 * Locking for __es_scan_clu() for external use
399 bool ext4_es_scan_clu(struct inode *inode,
400 int (*matching_fn)(struct extent_status *es),
405 read_lock(&EXT4_I(inode)->i_es_lock);
406 ret = __es_scan_clu(inode, matching_fn, lblk);
407 read_unlock(&EXT4_I(inode)->i_es_lock);
412 static void ext4_es_list_add(struct inode *inode)
414 struct ext4_inode_info *ei = EXT4_I(inode);
415 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
417 if (!list_empty(&ei->i_es_list))
420 spin_lock(&sbi->s_es_lock);
421 if (list_empty(&ei->i_es_list)) {
422 list_add_tail(&ei->i_es_list, &sbi->s_es_list);
423 sbi->s_es_nr_inode++;
425 spin_unlock(&sbi->s_es_lock);
428 static void ext4_es_list_del(struct inode *inode)
430 struct ext4_inode_info *ei = EXT4_I(inode);
431 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
433 spin_lock(&sbi->s_es_lock);
434 if (!list_empty(&ei->i_es_list)) {
435 list_del_init(&ei->i_es_list);
436 sbi->s_es_nr_inode--;
437 WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
439 spin_unlock(&sbi->s_es_lock);
442 static struct extent_status *
443 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
446 struct extent_status *es;
447 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
455 * We don't count delayed extent because we never try to reclaim them
457 if (!ext4_es_is_delayed(es)) {
458 if (!EXT4_I(inode)->i_es_shk_nr++)
459 ext4_es_list_add(inode);
460 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
461 s_es_stats.es_stats_shk_cnt);
464 EXT4_I(inode)->i_es_all_nr++;
465 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
470 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
472 EXT4_I(inode)->i_es_all_nr--;
473 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
475 /* Decrease the shrink counter when this es is not delayed */
476 if (!ext4_es_is_delayed(es)) {
477 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
478 if (!--EXT4_I(inode)->i_es_shk_nr)
479 ext4_es_list_del(inode);
480 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
481 s_es_stats.es_stats_shk_cnt);
484 kmem_cache_free(ext4_es_cachep, es);
488 * Check whether or not two extents can be merged
490 * - logical block number is contiguous
491 * - physical block number is contiguous
494 static int ext4_es_can_be_merged(struct extent_status *es1,
495 struct extent_status *es2)
497 if (ext4_es_type(es1) != ext4_es_type(es2))
500 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
501 pr_warn("ES assertion failed when merging extents. "
502 "The sum of lengths of es1 (%d) and es2 (%d) "
503 "is bigger than allowed file size (%d)\n",
504 es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
509 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
512 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
513 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
516 if (ext4_es_is_hole(es1))
519 /* we need to check delayed extent is without unwritten status */
520 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
526 static struct extent_status *
527 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
529 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
530 struct extent_status *es1;
531 struct rb_node *node;
533 node = rb_prev(&es->rb_node);
537 es1 = rb_entry(node, struct extent_status, rb_node);
538 if (ext4_es_can_be_merged(es1, es)) {
539 es1->es_len += es->es_len;
540 if (ext4_es_is_referenced(es))
541 ext4_es_set_referenced(es1);
542 rb_erase(&es->rb_node, &tree->root);
543 ext4_es_free_extent(inode, es);
550 static struct extent_status *
551 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
553 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
554 struct extent_status *es1;
555 struct rb_node *node;
557 node = rb_next(&es->rb_node);
561 es1 = rb_entry(node, struct extent_status, rb_node);
562 if (ext4_es_can_be_merged(es, es1)) {
563 es->es_len += es1->es_len;
564 if (ext4_es_is_referenced(es1))
565 ext4_es_set_referenced(es);
566 rb_erase(node, &tree->root);
567 ext4_es_free_extent(inode, es1);
573 #ifdef ES_AGGRESSIVE_TEST
574 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
576 static void ext4_es_insert_extent_ext_check(struct inode *inode,
577 struct extent_status *es)
579 struct ext4_ext_path *path = NULL;
580 struct ext4_extent *ex;
581 ext4_lblk_t ee_block;
582 ext4_fsblk_t ee_start;
583 unsigned short ee_len;
584 int depth, ee_status, es_status;
586 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
590 depth = ext_depth(inode);
591 ex = path[depth].p_ext;
595 ee_block = le32_to_cpu(ex->ee_block);
596 ee_start = ext4_ext_pblock(ex);
597 ee_len = ext4_ext_get_actual_len(ex);
599 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
600 es_status = ext4_es_is_unwritten(es) ? 1 : 0;
603 * Make sure ex and es are not overlap when we try to insert
604 * a delayed/hole extent.
606 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
607 if (in_range(es->es_lblk, ee_block, ee_len)) {
608 pr_warn("ES insert assertion failed for "
609 "inode: %lu we can find an extent "
610 "at block [%d/%d/%llu/%c], but we "
611 "want to add a delayed/hole extent "
613 inode->i_ino, ee_block, ee_len,
614 ee_start, ee_status ? 'u' : 'w',
615 es->es_lblk, es->es_len,
616 ext4_es_pblock(es), ext4_es_status(es));
622 * We don't check ee_block == es->es_lblk, etc. because es
623 * might be a part of whole extent, vice versa.
625 if (es->es_lblk < ee_block ||
626 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
627 pr_warn("ES insert assertion failed for inode: %lu "
628 "ex_status [%d/%d/%llu/%c] != "
629 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
630 ee_block, ee_len, ee_start,
631 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
632 ext4_es_pblock(es), es_status ? 'u' : 'w');
636 if (ee_status ^ es_status) {
637 pr_warn("ES insert assertion failed for inode: %lu "
638 "ex_status [%d/%d/%llu/%c] != "
639 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
640 ee_block, ee_len, ee_start,
641 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
642 ext4_es_pblock(es), es_status ? 'u' : 'w');
646 * We can't find an extent on disk. So we need to make sure
647 * that we don't want to add an written/unwritten extent.
649 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
650 pr_warn("ES insert assertion failed for inode: %lu "
651 "can't find an extent at block %d but we want "
652 "to add a written/unwritten extent "
653 "[%d/%d/%llu/%x]\n", inode->i_ino,
654 es->es_lblk, es->es_lblk, es->es_len,
655 ext4_es_pblock(es), ext4_es_status(es));
659 ext4_ext_drop_refs(path);
663 static void ext4_es_insert_extent_ind_check(struct inode *inode,
664 struct extent_status *es)
666 struct ext4_map_blocks map;
670 * Here we call ext4_ind_map_blocks to lookup a block mapping because
671 * 'Indirect' structure is defined in indirect.c. So we couldn't
672 * access direct/indirect tree from outside. It is too dirty to define
673 * this function in indirect.c file.
676 map.m_lblk = es->es_lblk;
677 map.m_len = es->es_len;
679 retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
681 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
683 * We want to add a delayed/hole extent but this
684 * block has been allocated.
686 pr_warn("ES insert assertion failed for inode: %lu "
687 "We can find blocks but we want to add a "
688 "delayed/hole extent [%d/%d/%llu/%x]\n",
689 inode->i_ino, es->es_lblk, es->es_len,
690 ext4_es_pblock(es), ext4_es_status(es));
692 } else if (ext4_es_is_written(es)) {
693 if (retval != es->es_len) {
694 pr_warn("ES insert assertion failed for "
695 "inode: %lu retval %d != es_len %d\n",
696 inode->i_ino, retval, es->es_len);
699 if (map.m_pblk != ext4_es_pblock(es)) {
700 pr_warn("ES insert assertion failed for "
701 "inode: %lu m_pblk %llu != "
703 inode->i_ino, map.m_pblk,
709 * We don't need to check unwritten extent because
710 * indirect-based file doesn't have it.
714 } else if (retval == 0) {
715 if (ext4_es_is_written(es)) {
716 pr_warn("ES insert assertion failed for inode: %lu "
717 "We can't find the block but we want to add "
718 "a written extent [%d/%d/%llu/%x]\n",
719 inode->i_ino, es->es_lblk, es->es_len,
720 ext4_es_pblock(es), ext4_es_status(es));
726 static inline void ext4_es_insert_extent_check(struct inode *inode,
727 struct extent_status *es)
730 * We don't need to worry about the race condition because
731 * caller takes i_data_sem locking.
733 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
734 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
735 ext4_es_insert_extent_ext_check(inode, es);
737 ext4_es_insert_extent_ind_check(inode, es);
740 static inline void ext4_es_insert_extent_check(struct inode *inode,
741 struct extent_status *es)
746 static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
748 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
749 struct rb_node **p = &tree->root.rb_node;
750 struct rb_node *parent = NULL;
751 struct extent_status *es;
755 es = rb_entry(parent, struct extent_status, rb_node);
757 if (newes->es_lblk < es->es_lblk) {
758 if (ext4_es_can_be_merged(newes, es)) {
760 * Here we can modify es_lblk directly
761 * because it isn't overlapped.
763 es->es_lblk = newes->es_lblk;
764 es->es_len += newes->es_len;
765 if (ext4_es_is_written(es) ||
766 ext4_es_is_unwritten(es))
767 ext4_es_store_pblock(es,
769 es = ext4_es_try_to_merge_left(inode, es);
773 } else if (newes->es_lblk > ext4_es_end(es)) {
774 if (ext4_es_can_be_merged(es, newes)) {
775 es->es_len += newes->es_len;
776 es = ext4_es_try_to_merge_right(inode, es);
786 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
790 rb_link_node(&es->rb_node, parent, p);
791 rb_insert_color(&es->rb_node, &tree->root);
799 * ext4_es_insert_extent() adds information to an inode's extent
802 * Return 0 on success, error code on failure.
804 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
805 ext4_lblk_t len, ext4_fsblk_t pblk,
808 struct extent_status newes;
809 ext4_lblk_t end = lblk + len - 1;
811 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
813 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
814 lblk, len, pblk, status, inode->i_ino);
821 if ((status & EXTENT_STATUS_DELAYED) &&
822 (status & EXTENT_STATUS_WRITTEN)) {
823 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
824 " delayed and written which can potentially "
825 " cause data loss.", lblk, len);
829 newes.es_lblk = lblk;
831 ext4_es_store_pblock_status(&newes, pblk, status);
832 trace_ext4_es_insert_extent(inode, &newes);
834 ext4_es_insert_extent_check(inode, &newes);
836 write_lock(&EXT4_I(inode)->i_es_lock);
837 err = __es_remove_extent(inode, lblk, end, NULL);
841 err = __es_insert_extent(inode, &newes);
842 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
845 if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
848 if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
849 (status & EXTENT_STATUS_WRITTEN ||
850 status & EXTENT_STATUS_UNWRITTEN))
851 __revise_pending(inode, lblk, len);
854 write_unlock(&EXT4_I(inode)->i_es_lock);
856 ext4_es_print_tree(inode);
862 * ext4_es_cache_extent() inserts information into the extent status
863 * tree if and only if there isn't information about the range in
866 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
867 ext4_lblk_t len, ext4_fsblk_t pblk,
870 struct extent_status *es;
871 struct extent_status newes;
872 ext4_lblk_t end = lblk + len - 1;
874 newes.es_lblk = lblk;
876 ext4_es_store_pblock_status(&newes, pblk, status);
877 trace_ext4_es_cache_extent(inode, &newes);
884 write_lock(&EXT4_I(inode)->i_es_lock);
886 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
887 if (!es || es->es_lblk > end)
888 __es_insert_extent(inode, &newes);
889 write_unlock(&EXT4_I(inode)->i_es_lock);
893 * ext4_es_lookup_extent() looks up an extent in extent status tree.
895 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
897 * Return: 1 on found, 0 on not
899 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
900 ext4_lblk_t *next_lblk,
901 struct extent_status *es)
903 struct ext4_es_tree *tree;
904 struct ext4_es_stats *stats;
905 struct extent_status *es1 = NULL;
906 struct rb_node *node;
909 trace_ext4_es_lookup_extent_enter(inode, lblk);
910 es_debug("lookup extent in block %u\n", lblk);
912 tree = &EXT4_I(inode)->i_es_tree;
913 read_lock(&EXT4_I(inode)->i_es_lock);
915 /* find extent in cache firstly */
916 es->es_lblk = es->es_len = es->es_pblk = 0;
917 es1 = READ_ONCE(tree->cache_es);
918 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
919 es_debug("%u cached by [%u/%u)\n",
920 lblk, es1->es_lblk, es1->es_len);
925 node = tree->root.rb_node;
927 es1 = rb_entry(node, struct extent_status, rb_node);
928 if (lblk < es1->es_lblk)
929 node = node->rb_left;
930 else if (lblk > ext4_es_end(es1))
931 node = node->rb_right;
939 stats = &EXT4_SB(inode->i_sb)->s_es_stats;
942 es->es_lblk = es1->es_lblk;
943 es->es_len = es1->es_len;
944 es->es_pblk = es1->es_pblk;
945 if (!ext4_es_is_referenced(es1))
946 ext4_es_set_referenced(es1);
947 percpu_counter_inc(&stats->es_stats_cache_hits);
949 node = rb_next(&es1->rb_node);
951 es1 = rb_entry(node, struct extent_status,
953 *next_lblk = es1->es_lblk;
958 percpu_counter_inc(&stats->es_stats_cache_misses);
961 read_unlock(&EXT4_I(inode)->i_es_lock);
963 trace_ext4_es_lookup_extent_exit(inode, es, found);
969 bool first_do_lblk_found;
970 ext4_lblk_t first_do_lblk;
971 ext4_lblk_t last_do_lblk;
972 struct extent_status *left_es;
978 * init_rsvd - initialize reserved count data before removing block range
979 * in file from extent status tree
981 * @inode - file containing range
982 * @lblk - first block in range
983 * @es - pointer to first extent in range
984 * @rc - pointer to reserved count data
986 * Assumes es is not NULL
988 static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
989 struct extent_status *es, struct rsvd_count *rc)
991 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
992 struct rb_node *node;
997 * for bigalloc, note the first delonly block in the range has not
998 * been found, record the extent containing the block to the left of
999 * the region to be removed, if any, and note that there's no partial
1002 if (sbi->s_cluster_ratio > 1) {
1003 rc->first_do_lblk_found = false;
1004 if (lblk > es->es_lblk) {
1007 node = rb_prev(&es->rb_node);
1008 rc->left_es = node ? rb_entry(node,
1009 struct extent_status,
1012 rc->partial = false;
1017 * count_rsvd - count the clusters containing delayed and not unwritten
1018 * (delonly) blocks in a range within an extent and add to
1019 * the running tally in rsvd_count
1021 * @inode - file containing extent
1022 * @lblk - first block in range
1023 * @len - length of range in blocks
1024 * @es - pointer to extent containing clusters to be counted
1025 * @rc - pointer to reserved count data
1027 * Tracks partial clusters found at the beginning and end of extents so
1028 * they aren't overcounted when they span adjacent extents
1030 static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
1031 struct extent_status *es, struct rsvd_count *rc)
1033 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1034 ext4_lblk_t i, end, nclu;
1036 if (!ext4_es_is_delonly(es))
1041 if (sbi->s_cluster_ratio == 1) {
1042 rc->ndelonly += (int) len;
1048 i = (lblk < es->es_lblk) ? es->es_lblk : lblk;
1049 end = lblk + (ext4_lblk_t) len - 1;
1050 end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
1052 /* record the first block of the first delonly extent seen */
1053 if (rc->first_do_lblk_found == false) {
1054 rc->first_do_lblk = i;
1055 rc->first_do_lblk_found = true;
1058 /* update the last lblk in the region seen so far */
1059 rc->last_do_lblk = end;
1062 * if we're tracking a partial cluster and the current extent
1063 * doesn't start with it, count it and stop tracking
1065 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
1067 rc->partial = false;
1071 * if the first cluster doesn't start on a cluster boundary but
1072 * ends on one, count it
1074 if (EXT4_LBLK_COFF(sbi, i) != 0) {
1075 if (end >= EXT4_LBLK_CFILL(sbi, i)) {
1077 rc->partial = false;
1078 i = EXT4_LBLK_CFILL(sbi, i) + 1;
1083 * if the current cluster starts on a cluster boundary, count the
1084 * number of whole delonly clusters in the extent
1086 if ((i + sbi->s_cluster_ratio - 1) <= end) {
1087 nclu = (end - i + 1) >> sbi->s_cluster_bits;
1088 rc->ndelonly += nclu;
1089 i += nclu << sbi->s_cluster_bits;
1093 * start tracking a partial cluster if there's a partial at the end
1094 * of the current extent and we're not already tracking one
1096 if (!rc->partial && i <= end) {
1098 rc->lclu = EXT4_B2C(sbi, i);
1103 * __pr_tree_search - search for a pending cluster reservation
1105 * @root - root of pending reservation tree
1106 * @lclu - logical cluster to search for
1108 * Returns the pending reservation for the cluster identified by @lclu
1109 * if found. If not, returns a reservation for the next cluster if any,
1110 * and if not, returns NULL.
1112 static struct pending_reservation *__pr_tree_search(struct rb_root *root,
1115 struct rb_node *node = root->rb_node;
1116 struct pending_reservation *pr = NULL;
1119 pr = rb_entry(node, struct pending_reservation, rb_node);
1120 if (lclu < pr->lclu)
1121 node = node->rb_left;
1122 else if (lclu > pr->lclu)
1123 node = node->rb_right;
1127 if (pr && lclu < pr->lclu)
1129 if (pr && lclu > pr->lclu) {
1130 node = rb_next(&pr->rb_node);
1131 return node ? rb_entry(node, struct pending_reservation,
1138 * get_rsvd - calculates and returns the number of cluster reservations to be
1139 * released when removing a block range from the extent status tree
1140 * and releases any pending reservations within the range
1142 * @inode - file containing block range
1143 * @end - last block in range
1144 * @right_es - pointer to extent containing next block beyond end or NULL
1145 * @rc - pointer to reserved count data
1147 * The number of reservations to be released is equal to the number of
1148 * clusters containing delayed and not unwritten (delonly) blocks within
1149 * the range, minus the number of clusters still containing delonly blocks
1150 * at the ends of the range, and minus the number of pending reservations
1153 static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
1154 struct extent_status *right_es,
1155 struct rsvd_count *rc)
1157 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1158 struct pending_reservation *pr;
1159 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1160 struct rb_node *node;
1161 ext4_lblk_t first_lclu, last_lclu;
1162 bool left_delonly, right_delonly, count_pending;
1163 struct extent_status *es;
1165 if (sbi->s_cluster_ratio > 1) {
1166 /* count any remaining partial cluster */
1170 if (rc->ndelonly == 0)
1173 first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
1174 last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
1177 * decrease the delonly count by the number of clusters at the
1178 * ends of the range that still contain delonly blocks -
1179 * these clusters still need to be reserved
1181 left_delonly = right_delonly = false;
1184 while (es && ext4_es_end(es) >=
1185 EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
1186 if (ext4_es_is_delonly(es)) {
1188 left_delonly = true;
1191 node = rb_prev(&es->rb_node);
1194 es = rb_entry(node, struct extent_status, rb_node);
1196 if (right_es && (!left_delonly || first_lclu != last_lclu)) {
1197 if (end < ext4_es_end(right_es)) {
1200 node = rb_next(&right_es->rb_node);
1201 es = node ? rb_entry(node, struct extent_status,
1204 while (es && es->es_lblk <=
1205 EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
1206 if (ext4_es_is_delonly(es)) {
1208 right_delonly = true;
1211 node = rb_next(&es->rb_node);
1214 es = rb_entry(node, struct extent_status,
1220 * Determine the block range that should be searched for
1221 * pending reservations, if any. Clusters on the ends of the
1222 * original removed range containing delonly blocks are
1223 * excluded. They've already been accounted for and it's not
1224 * possible to determine if an associated pending reservation
1225 * should be released with the information available in the
1226 * extents status tree.
1228 if (first_lclu == last_lclu) {
1229 if (left_delonly | right_delonly)
1230 count_pending = false;
1232 count_pending = true;
1238 if (first_lclu <= last_lclu)
1239 count_pending = true;
1241 count_pending = false;
1245 * a pending reservation found between first_lclu and last_lclu
1246 * represents an allocated cluster that contained at least one
1247 * delonly block, so the delonly total must be reduced by one
1248 * for each pending reservation found and released
1250 if (count_pending) {
1251 pr = __pr_tree_search(&tree->root, first_lclu);
1252 while (pr && pr->lclu <= last_lclu) {
1254 node = rb_next(&pr->rb_node);
1255 rb_erase(&pr->rb_node, &tree->root);
1256 kmem_cache_free(ext4_pending_cachep, pr);
1259 pr = rb_entry(node, struct pending_reservation,
1264 return rc->ndelonly;
1269 * __es_remove_extent - removes block range from extent status tree
1271 * @inode - file containing range
1272 * @lblk - first block in range
1273 * @end - last block in range
1274 * @reserved - number of cluster reservations released
1276 * If @reserved is not NULL and delayed allocation is enabled, counts
1277 * block/cluster reservations freed by removing range and if bigalloc
1278 * enabled cancels pending reservations as needed. Returns 0 on success,
1279 * error code on failure.
1281 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
1282 ext4_lblk_t end, int *reserved)
1284 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
1285 struct rb_node *node;
1286 struct extent_status *es;
1287 struct extent_status orig_es;
1288 ext4_lblk_t len1, len2;
1291 bool count_reserved = true;
1292 struct rsvd_count rc;
1294 if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
1295 count_reserved = false;
1299 es = __es_tree_search(&tree->root, lblk);
1302 if (es->es_lblk > end)
1305 /* Simply invalidate cache_es. */
1306 tree->cache_es = NULL;
1308 init_rsvd(inode, lblk, es, &rc);
1310 orig_es.es_lblk = es->es_lblk;
1311 orig_es.es_len = es->es_len;
1312 orig_es.es_pblk = es->es_pblk;
1314 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
1315 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
1320 struct extent_status newes;
1322 newes.es_lblk = end + 1;
1323 newes.es_len = len2;
1324 block = 0x7FDEADBEEFULL;
1325 if (ext4_es_is_written(&orig_es) ||
1326 ext4_es_is_unwritten(&orig_es))
1327 block = ext4_es_pblock(&orig_es) +
1328 orig_es.es_len - len2;
1329 ext4_es_store_pblock_status(&newes, block,
1330 ext4_es_status(&orig_es));
1331 err = __es_insert_extent(inode, &newes);
1333 es->es_lblk = orig_es.es_lblk;
1334 es->es_len = orig_es.es_len;
1335 if ((err == -ENOMEM) &&
1336 __es_shrink(EXT4_SB(inode->i_sb),
1337 128, EXT4_I(inode)))
1342 es->es_lblk = end + 1;
1344 if (ext4_es_is_written(es) ||
1345 ext4_es_is_unwritten(es)) {
1346 block = orig_es.es_pblk + orig_es.es_len - len2;
1347 ext4_es_store_pblock(es, block);
1351 count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
1353 goto out_get_reserved;
1358 count_rsvd(inode, lblk, orig_es.es_len - len1,
1360 node = rb_next(&es->rb_node);
1362 es = rb_entry(node, struct extent_status, rb_node);
1367 while (es && ext4_es_end(es) <= end) {
1369 count_rsvd(inode, es->es_lblk, es->es_len, es, &rc);
1370 node = rb_next(&es->rb_node);
1371 rb_erase(&es->rb_node, &tree->root);
1372 ext4_es_free_extent(inode, es);
1377 es = rb_entry(node, struct extent_status, rb_node);
1380 if (es && es->es_lblk < end + 1) {
1381 ext4_lblk_t orig_len = es->es_len;
1383 len1 = ext4_es_end(es) - end;
1385 count_rsvd(inode, es->es_lblk, orig_len - len1,
1387 es->es_lblk = end + 1;
1389 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
1390 block = es->es_pblk + orig_len - len1;
1391 ext4_es_store_pblock(es, block);
1397 *reserved = get_rsvd(inode, end, es, &rc);
1403 * ext4_es_remove_extent - removes block range from extent status tree
1405 * @inode - file containing range
1406 * @lblk - first block in range
1407 * @len - number of blocks to remove
1409 * Reduces block/cluster reservation count and for bigalloc cancels pending
1410 * reservations as needed. Returns 0 on success, error code on failure.
1412 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
1419 trace_ext4_es_remove_extent(inode, lblk, len);
1420 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
1421 lblk, len, inode->i_ino);
1426 end = lblk + len - 1;
1430 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
1431 * so that we are sure __es_shrink() is done with the inode before it
1434 write_lock(&EXT4_I(inode)->i_es_lock);
1435 err = __es_remove_extent(inode, lblk, end, &reserved);
1436 write_unlock(&EXT4_I(inode)->i_es_lock);
1437 ext4_es_print_tree(inode);
1438 ext4_da_release_space(inode, reserved);
1442 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
1443 struct ext4_inode_info *locked_ei)
1445 struct ext4_inode_info *ei;
1446 struct ext4_es_stats *es_stats;
1451 int retried = 0, nr_skipped = 0;
1453 es_stats = &sbi->s_es_stats;
1454 start_time = ktime_get();
1457 spin_lock(&sbi->s_es_lock);
1458 nr_to_walk = sbi->s_es_nr_inode;
1459 while (nr_to_walk-- > 0) {
1460 if (list_empty(&sbi->s_es_list)) {
1461 spin_unlock(&sbi->s_es_lock);
1464 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
1466 /* Move the inode to the tail */
1467 list_move_tail(&ei->i_es_list, &sbi->s_es_list);
1470 * Normally we try hard to avoid shrinking precached inodes,
1471 * but we will as a last resort.
1473 if (!retried && ext4_test_inode_state(&ei->vfs_inode,
1474 EXT4_STATE_EXT_PRECACHED)) {
1479 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
1484 * Now we hold i_es_lock which protects us from inode reclaim
1485 * freeing inode under us
1487 spin_unlock(&sbi->s_es_lock);
1489 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
1490 write_unlock(&ei->i_es_lock);
1492 if (nr_to_scan <= 0)
1494 spin_lock(&sbi->s_es_lock);
1496 spin_unlock(&sbi->s_es_lock);
1499 * If we skipped any inodes, and we weren't able to make any
1500 * forward progress, try again to scan precached inodes.
1502 if ((nr_shrunk == 0) && nr_skipped && !retried) {
1507 if (locked_ei && nr_shrunk == 0)
1508 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
1511 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1512 if (likely(es_stats->es_stats_scan_time))
1513 es_stats->es_stats_scan_time = (scan_time +
1514 es_stats->es_stats_scan_time*3) / 4;
1516 es_stats->es_stats_scan_time = scan_time;
1517 if (scan_time > es_stats->es_stats_max_scan_time)
1518 es_stats->es_stats_max_scan_time = scan_time;
1519 if (likely(es_stats->es_stats_shrunk))
1520 es_stats->es_stats_shrunk = (nr_shrunk +
1521 es_stats->es_stats_shrunk*3) / 4;
1523 es_stats->es_stats_shrunk = nr_shrunk;
1525 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
1526 nr_skipped, retried);
1530 static unsigned long ext4_es_count(struct shrinker *shrink,
1531 struct shrink_control *sc)
1534 struct ext4_sb_info *sbi;
1536 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
1537 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1538 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
1542 static unsigned long ext4_es_scan(struct shrinker *shrink,
1543 struct shrink_control *sc)
1545 struct ext4_sb_info *sbi = container_of(shrink,
1546 struct ext4_sb_info, s_es_shrinker);
1547 int nr_to_scan = sc->nr_to_scan;
1550 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1551 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
1553 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
1555 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1556 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
1560 int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v)
1562 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private);
1563 struct ext4_es_stats *es_stats = &sbi->s_es_stats;
1564 struct ext4_inode_info *ei, *max = NULL;
1565 unsigned int inode_cnt = 0;
1567 if (v != SEQ_START_TOKEN)
1570 /* here we just find an inode that has the max nr. of objects */
1571 spin_lock(&sbi->s_es_lock);
1572 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
1574 if (max && max->i_es_all_nr < ei->i_es_all_nr)
1579 spin_unlock(&sbi->s_es_lock);
1581 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
1582 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
1583 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
1584 seq_printf(seq, " %lld/%lld cache hits/misses\n",
1585 percpu_counter_sum_positive(&es_stats->es_stats_cache_hits),
1586 percpu_counter_sum_positive(&es_stats->es_stats_cache_misses));
1588 seq_printf(seq, " %d inodes on list\n", inode_cnt);
1590 seq_printf(seq, "average:\n %llu us scan time\n",
1591 div_u64(es_stats->es_stats_scan_time, 1000));
1592 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk);
1595 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1596 " %llu us max scan time\n",
1597 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
1598 div_u64(es_stats->es_stats_max_scan_time, 1000));
1603 int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1607 /* Make sure we have enough bits for physical block number */
1608 BUILD_BUG_ON(ES_SHIFT < 48);
1609 INIT_LIST_HEAD(&sbi->s_es_list);
1610 sbi->s_es_nr_inode = 0;
1611 spin_lock_init(&sbi->s_es_lock);
1612 sbi->s_es_stats.es_stats_shrunk = 0;
1613 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0,
1617 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0,
1621 sbi->s_es_stats.es_stats_scan_time = 0;
1622 sbi->s_es_stats.es_stats_max_scan_time = 0;
1623 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
1626 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
1630 sbi->s_es_shrinker.scan_objects = ext4_es_scan;
1631 sbi->s_es_shrinker.count_objects = ext4_es_count;
1632 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
1633 err = register_shrinker(&sbi->s_es_shrinker);
1639 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1641 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1643 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
1645 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
1649 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
1651 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
1652 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
1653 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1654 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1655 unregister_shrinker(&sbi->s_es_shrinker);
1659 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1660 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1662 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1663 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1664 * ei->i_es_shrink_lblk to where we should continue scanning.
1666 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
1667 int *nr_to_scan, int *nr_shrunk)
1669 struct inode *inode = &ei->vfs_inode;
1670 struct ext4_es_tree *tree = &ei->i_es_tree;
1671 struct extent_status *es;
1672 struct rb_node *node;
1674 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
1678 while (*nr_to_scan > 0) {
1679 if (es->es_lblk > end) {
1680 ei->i_es_shrink_lblk = end + 1;
1685 node = rb_next(&es->rb_node);
1687 * We can't reclaim delayed extent from status tree because
1688 * fiemap, bigallic, and seek_data/hole need to use it.
1690 if (ext4_es_is_delayed(es))
1692 if (ext4_es_is_referenced(es)) {
1693 ext4_es_clear_referenced(es);
1697 rb_erase(&es->rb_node, &tree->root);
1698 ext4_es_free_extent(inode, es);
1703 es = rb_entry(node, struct extent_status, rb_node);
1705 ei->i_es_shrink_lblk = es->es_lblk;
1708 ei->i_es_shrink_lblk = 0;
1712 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
1714 struct inode *inode = &ei->vfs_inode;
1716 ext4_lblk_t start = ei->i_es_shrink_lblk;
1717 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1718 DEFAULT_RATELIMIT_BURST);
1720 if (ei->i_es_shk_nr == 0)
1723 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
1725 ext4_warning(inode->i_sb, "forced shrink of precached extents");
1727 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
1729 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
1731 ei->i_es_tree.cache_es = NULL;
1736 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove
1737 * discretionary entries from the extent status cache. (Some entries
1738 * must be present for proper operations.)
1740 void ext4_clear_inode_es(struct inode *inode)
1742 struct ext4_inode_info *ei = EXT4_I(inode);
1743 struct extent_status *es;
1744 struct ext4_es_tree *tree;
1745 struct rb_node *node;
1747 write_lock(&ei->i_es_lock);
1748 tree = &EXT4_I(inode)->i_es_tree;
1749 tree->cache_es = NULL;
1750 node = rb_first(&tree->root);
1752 es = rb_entry(node, struct extent_status, rb_node);
1753 node = rb_next(node);
1754 if (!ext4_es_is_delayed(es)) {
1755 rb_erase(&es->rb_node, &tree->root);
1756 ext4_es_free_extent(inode, es);
1759 ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
1760 write_unlock(&ei->i_es_lock);
1764 static void ext4_print_pending_tree(struct inode *inode)
1766 struct ext4_pending_tree *tree;
1767 struct rb_node *node;
1768 struct pending_reservation *pr;
1770 printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
1771 tree = &EXT4_I(inode)->i_pending_tree;
1772 node = rb_first(&tree->root);
1774 pr = rb_entry(node, struct pending_reservation, rb_node);
1775 printk(KERN_DEBUG " %u", pr->lclu);
1776 node = rb_next(node);
1778 printk(KERN_DEBUG "\n");
1781 #define ext4_print_pending_tree(inode)
1784 int __init ext4_init_pending(void)
1786 ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
1787 sizeof(struct pending_reservation),
1788 0, (SLAB_RECLAIM_ACCOUNT), NULL);
1789 if (ext4_pending_cachep == NULL)
1794 void ext4_exit_pending(void)
1796 kmem_cache_destroy(ext4_pending_cachep);
1799 void ext4_init_pending_tree(struct ext4_pending_tree *tree)
1801 tree->root = RB_ROOT;
1805 * __get_pending - retrieve a pointer to a pending reservation
1807 * @inode - file containing the pending cluster reservation
1808 * @lclu - logical cluster of interest
1810 * Returns a pointer to a pending reservation if it's a member of
1811 * the set, and NULL if not. Must be called holding i_es_lock.
1813 static struct pending_reservation *__get_pending(struct inode *inode,
1816 struct ext4_pending_tree *tree;
1817 struct rb_node *node;
1818 struct pending_reservation *pr = NULL;
1820 tree = &EXT4_I(inode)->i_pending_tree;
1821 node = (&tree->root)->rb_node;
1824 pr = rb_entry(node, struct pending_reservation, rb_node);
1825 if (lclu < pr->lclu)
1826 node = node->rb_left;
1827 else if (lclu > pr->lclu)
1828 node = node->rb_right;
1829 else if (lclu == pr->lclu)
1836 * __insert_pending - adds a pending cluster reservation to the set of
1837 * pending reservations
1839 * @inode - file containing the cluster
1840 * @lblk - logical block in the cluster to be added
1842 * Returns 0 on successful insertion and -ENOMEM on failure. If the
1843 * pending reservation is already in the set, returns successfully.
1845 static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
1847 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1848 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1849 struct rb_node **p = &tree->root.rb_node;
1850 struct rb_node *parent = NULL;
1851 struct pending_reservation *pr;
1855 lclu = EXT4_B2C(sbi, lblk);
1856 /* search to find parent for insertion */
1859 pr = rb_entry(parent, struct pending_reservation, rb_node);
1861 if (lclu < pr->lclu) {
1863 } else if (lclu > pr->lclu) {
1864 p = &(*p)->rb_right;
1866 /* pending reservation already inserted */
1871 pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
1878 rb_link_node(&pr->rb_node, parent, p);
1879 rb_insert_color(&pr->rb_node, &tree->root);
1886 * __remove_pending - removes a pending cluster reservation from the set
1887 * of pending reservations
1889 * @inode - file containing the cluster
1890 * @lblk - logical block in the pending cluster reservation to be removed
1892 * Returns successfully if pending reservation is not a member of the set.
1894 static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
1896 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1897 struct pending_reservation *pr;
1898 struct ext4_pending_tree *tree;
1900 pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
1902 tree = &EXT4_I(inode)->i_pending_tree;
1903 rb_erase(&pr->rb_node, &tree->root);
1904 kmem_cache_free(ext4_pending_cachep, pr);
1909 * ext4_remove_pending - removes a pending cluster reservation from the set
1910 * of pending reservations
1912 * @inode - file containing the cluster
1913 * @lblk - logical block in the pending cluster reservation to be removed
1915 * Locking for external use of __remove_pending.
1917 void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
1919 struct ext4_inode_info *ei = EXT4_I(inode);
1921 write_lock(&ei->i_es_lock);
1922 __remove_pending(inode, lblk);
1923 write_unlock(&ei->i_es_lock);
1927 * ext4_is_pending - determine whether a cluster has a pending reservation
1930 * @inode - file containing the cluster
1931 * @lblk - logical block in the cluster
1933 * Returns true if there's a pending reservation for the cluster in the
1934 * set of pending reservations, and false if not.
1936 bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
1938 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1939 struct ext4_inode_info *ei = EXT4_I(inode);
1942 read_lock(&ei->i_es_lock);
1943 ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
1944 read_unlock(&ei->i_es_lock);
1950 * ext4_es_insert_delayed_block - adds a delayed block to the extents status
1951 * tree, adding a pending reservation where
1954 * @inode - file containing the newly added block
1955 * @lblk - logical block to be added
1956 * @allocated - indicates whether a physical cluster has been allocated for
1957 * the logical cluster that contains the block
1959 * Returns 0 on success, negative error code on failure.
1961 int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
1964 struct extent_status newes;
1967 es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
1968 lblk, inode->i_ino);
1970 newes.es_lblk = lblk;
1972 ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
1973 trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
1975 ext4_es_insert_extent_check(inode, &newes);
1977 write_lock(&EXT4_I(inode)->i_es_lock);
1979 err = __es_remove_extent(inode, lblk, lblk, NULL);
1983 err = __es_insert_extent(inode, &newes);
1984 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
1985 128, EXT4_I(inode)))
1991 __insert_pending(inode, lblk);
1994 write_unlock(&EXT4_I(inode)->i_es_lock);
1996 ext4_es_print_tree(inode);
1997 ext4_print_pending_tree(inode);
2003 * __es_delayed_clu - count number of clusters containing blocks that
2006 * @inode - file containing block range
2007 * @start - logical block defining start of range
2008 * @end - logical block defining end of range
2010 * Returns the number of clusters containing only delayed (not delayed
2011 * and unwritten) blocks in the range specified by @start and @end. Any
2012 * cluster or part of a cluster within the range and containing a delayed
2013 * and not unwritten block within the range is counted as a whole cluster.
2015 static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
2018 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
2019 struct extent_status *es;
2020 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2021 struct rb_node *node;
2022 ext4_lblk_t first_lclu, last_lclu;
2023 unsigned long long last_counted_lclu;
2026 /* guaranteed to be unequal to any ext4_lblk_t value */
2027 last_counted_lclu = ~0ULL;
2029 es = __es_tree_search(&tree->root, start);
2031 while (es && (es->es_lblk <= end)) {
2032 if (ext4_es_is_delonly(es)) {
2033 if (es->es_lblk <= start)
2034 first_lclu = EXT4_B2C(sbi, start);
2036 first_lclu = EXT4_B2C(sbi, es->es_lblk);
2038 if (ext4_es_end(es) >= end)
2039 last_lclu = EXT4_B2C(sbi, end);
2041 last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
2043 if (first_lclu == last_counted_lclu)
2044 n += last_lclu - first_lclu;
2046 n += last_lclu - first_lclu + 1;
2047 last_counted_lclu = last_lclu;
2049 node = rb_next(&es->rb_node);
2052 es = rb_entry(node, struct extent_status, rb_node);
2059 * ext4_es_delayed_clu - count number of clusters containing blocks that
2060 * are both delayed and unwritten
2062 * @inode - file containing block range
2063 * @lblk - logical block defining start of range
2064 * @len - number of blocks in range
2066 * Locking for external use of __es_delayed_clu().
2068 unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
2071 struct ext4_inode_info *ei = EXT4_I(inode);
2078 end = lblk + len - 1;
2079 WARN_ON(end < lblk);
2081 read_lock(&ei->i_es_lock);
2083 n = __es_delayed_clu(inode, lblk, end);
2085 read_unlock(&ei->i_es_lock);
2091 * __revise_pending - makes, cancels, or leaves unchanged pending cluster
2092 * reservations for a specified block range depending
2093 * upon the presence or absence of delayed blocks
2094 * outside the range within clusters at the ends of the
2097 * @inode - file containing the range
2098 * @lblk - logical block defining the start of range
2099 * @len - length of range in blocks
2101 * Used after a newly allocated extent is added to the extents status tree.
2102 * Requires that the extents in the range have either written or unwritten
2103 * status. Must be called while holding i_es_lock.
2105 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
2108 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2109 ext4_lblk_t end = lblk + len - 1;
2110 ext4_lblk_t first, last;
2111 bool f_del = false, l_del = false;
2117 * Two cases - block range within single cluster and block range
2118 * spanning two or more clusters. Note that a cluster belonging
2119 * to a range starting and/or ending on a cluster boundary is treated
2120 * as if it does not contain a delayed extent. The new range may
2121 * have allocated space for previously delayed blocks out to the
2122 * cluster boundary, requiring that any pre-existing pending
2123 * reservation be canceled. Because this code only looks at blocks
2124 * outside the range, it should revise pending reservations
2125 * correctly even if the extent represented by the range can't be
2126 * inserted in the extents status tree due to ENOSPC.
2129 if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
2130 first = EXT4_LBLK_CMASK(sbi, lblk);
2132 f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2135 __insert_pending(inode, first);
2137 last = EXT4_LBLK_CMASK(sbi, end) +
2138 sbi->s_cluster_ratio - 1;
2140 l_del = __es_scan_range(inode,
2141 &ext4_es_is_delonly,
2144 __insert_pending(inode, last);
2146 __remove_pending(inode, last);
2149 first = EXT4_LBLK_CMASK(sbi, lblk);
2151 f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2154 __insert_pending(inode, first);
2156 __remove_pending(inode, first);
2158 last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
2160 l_del = __es_scan_range(inode, &ext4_es_is_delonly,
2163 __insert_pending(inode, last);
2165 __remove_pending(inode, last);