1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs extent cache support
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
10 * block_age-based extent cache added by:
11 * Copyright (c) 2022 xiaomi Co., Ltd.
12 * http://www.xiaomi.com/
16 #include <linux/f2fs_fs.h>
20 #include <trace/events/f2fs.h>
22 bool sanity_check_extent_cache(struct inode *inode)
24 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
25 struct f2fs_inode_info *fi = F2FS_I(inode);
26 struct extent_tree *et = fi->extent_tree[EX_READ];
27 struct extent_info *ei;
36 /* Let's drop, if checkpoint got corrupted. */
37 if (is_set_ckpt_flags(sbi, CP_ERROR_FLAG)) {
39 et->largest_updated = true;
43 if (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE) ||
44 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
45 DATA_GENERIC_ENHANCE)) {
46 set_sbi_flag(sbi, SBI_NEED_FSCK);
47 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
48 __func__, inode->i_ino,
49 ei->blk, ei->fofs, ei->len);
55 static void __set_extent_info(struct extent_info *ei,
56 unsigned int fofs, unsigned int len,
57 block_t blk, bool keep_clen,
58 unsigned long age, unsigned long last_blocks,
59 enum extent_type type)
64 if (type == EX_READ) {
68 #ifdef CONFIG_F2FS_FS_COMPRESSION
71 } else if (type == EX_BLOCK_AGE) {
73 ei->last_blocks = last_blocks;
77 static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
80 return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
81 S_ISREG(inode->i_mode);
82 if (type == EX_BLOCK_AGE)
83 return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
84 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
88 static bool __may_extent_tree(struct inode *inode, enum extent_type type)
91 * for recovered files during mount do not create extents
92 * if shrinker is not registered.
94 if (list_empty(&F2FS_I_SB(inode)->s_list))
97 if (!__init_may_extent_tree(inode, type))
100 if (type == EX_READ) {
101 if (is_inode_flag_set(inode, FI_NO_EXTENT))
103 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
104 !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
106 } else if (type == EX_BLOCK_AGE) {
107 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
109 if (file_is_cold(inode))
115 static void __try_update_largest_extent(struct extent_tree *et,
116 struct extent_node *en)
118 if (et->type != EX_READ)
120 if (en->ei.len <= et->largest.len)
123 et->largest = en->ei;
124 et->largest_updated = true;
127 static bool __is_extent_mergeable(struct extent_info *back,
128 struct extent_info *front, enum extent_type type)
130 if (type == EX_READ) {
131 #ifdef CONFIG_F2FS_FS_COMPRESSION
132 if (back->c_len && back->len != back->c_len)
134 if (front->c_len && front->len != front->c_len)
137 return (back->fofs + back->len == front->fofs &&
138 back->blk + back->len == front->blk);
139 } else if (type == EX_BLOCK_AGE) {
140 return (back->fofs + back->len == front->fofs &&
141 abs(back->age - front->age) <= SAME_AGE_REGION &&
142 abs(back->last_blocks - front->last_blocks) <=
148 static bool __is_back_mergeable(struct extent_info *cur,
149 struct extent_info *back, enum extent_type type)
151 return __is_extent_mergeable(back, cur, type);
154 static bool __is_front_mergeable(struct extent_info *cur,
155 struct extent_info *front, enum extent_type type)
157 return __is_extent_mergeable(cur, front, type);
160 static struct extent_node *__lookup_extent_node(struct rb_root_cached *root,
161 struct extent_node *cached_en, unsigned int fofs)
163 struct rb_node *node = root->rb_root.rb_node;
164 struct extent_node *en;
166 /* check a cached entry */
167 if (cached_en && cached_en->ei.fofs <= fofs &&
168 cached_en->ei.fofs + cached_en->ei.len > fofs)
173 en = rb_entry(node, struct extent_node, rb_node);
175 if (fofs < en->ei.fofs)
176 node = node->rb_left;
177 else if (fofs >= en->ei.fofs + en->ei.len)
178 node = node->rb_right;
186 * lookup rb entry in position of @fofs in rb-tree,
187 * if hit, return the entry, otherwise, return NULL
188 * @prev_ex: extent before fofs
189 * @next_ex: extent after fofs
190 * @insert_p: insert point for new extent at fofs
191 * in order to simplify the insertion after.
192 * tree must stay unchanged between lookup and insertion.
194 static struct extent_node *__lookup_extent_node_ret(struct rb_root_cached *root,
195 struct extent_node *cached_en,
197 struct extent_node **prev_entry,
198 struct extent_node **next_entry,
199 struct rb_node ***insert_p,
200 struct rb_node **insert_parent,
203 struct rb_node **pnode = &root->rb_root.rb_node;
204 struct rb_node *parent = NULL, *tmp_node;
205 struct extent_node *en = cached_en;
208 *insert_parent = NULL;
212 if (RB_EMPTY_ROOT(&root->rb_root))
215 if (en && en->ei.fofs <= fofs && en->ei.fofs + en->ei.len > fofs)
216 goto lookup_neighbors;
222 en = rb_entry(*pnode, struct extent_node, rb_node);
224 if (fofs < en->ei.fofs) {
225 pnode = &(*pnode)->rb_left;
226 } else if (fofs >= en->ei.fofs + en->ei.len) {
227 pnode = &(*pnode)->rb_right;
230 goto lookup_neighbors;
235 *insert_parent = parent;
237 en = rb_entry(parent, struct extent_node, rb_node);
239 if (parent && fofs > en->ei.fofs)
240 tmp_node = rb_next(parent);
241 *next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
244 if (parent && fofs < en->ei.fofs)
245 tmp_node = rb_prev(parent);
246 *prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
250 if (fofs == en->ei.fofs) {
251 /* lookup prev node for merging backward later */
252 tmp_node = rb_prev(&en->rb_node);
253 *prev_entry = rb_entry_safe(tmp_node,
254 struct extent_node, rb_node);
256 if (fofs == en->ei.fofs + en->ei.len - 1) {
257 /* lookup next node for merging frontward later */
258 tmp_node = rb_next(&en->rb_node);
259 *next_entry = rb_entry_safe(tmp_node,
260 struct extent_node, rb_node);
265 static struct kmem_cache *extent_tree_slab;
266 static struct kmem_cache *extent_node_slab;
268 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
269 struct extent_tree *et, struct extent_info *ei,
270 struct rb_node *parent, struct rb_node **p,
273 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
274 struct extent_node *en;
276 en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
281 INIT_LIST_HEAD(&en->list);
284 rb_link_node(&en->rb_node, parent, p);
285 rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
286 atomic_inc(&et->node_cnt);
287 atomic_inc(&eti->total_ext_node);
291 static void __detach_extent_node(struct f2fs_sb_info *sbi,
292 struct extent_tree *et, struct extent_node *en)
294 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
296 rb_erase_cached(&en->rb_node, &et->root);
297 atomic_dec(&et->node_cnt);
298 atomic_dec(&eti->total_ext_node);
300 if (et->cached_en == en)
301 et->cached_en = NULL;
302 kmem_cache_free(extent_node_slab, en);
306 * Flow to release an extent_node:
308 * 2. __detach_extent_node
309 * 3. kmem_cache_free.
311 static void __release_extent_node(struct f2fs_sb_info *sbi,
312 struct extent_tree *et, struct extent_node *en)
314 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
316 spin_lock(&eti->extent_lock);
317 f2fs_bug_on(sbi, list_empty(&en->list));
318 list_del_init(&en->list);
319 spin_unlock(&eti->extent_lock);
321 __detach_extent_node(sbi, et, en);
324 static struct extent_tree *__grab_extent_tree(struct inode *inode,
325 enum extent_type type)
327 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
328 struct extent_tree_info *eti = &sbi->extent_tree[type];
329 struct extent_tree *et;
330 nid_t ino = inode->i_ino;
332 mutex_lock(&eti->extent_tree_lock);
333 et = radix_tree_lookup(&eti->extent_tree_root, ino);
335 et = f2fs_kmem_cache_alloc(extent_tree_slab,
336 GFP_NOFS, true, NULL);
337 f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
338 memset(et, 0, sizeof(struct extent_tree));
341 et->root = RB_ROOT_CACHED;
342 et->cached_en = NULL;
343 rwlock_init(&et->lock);
344 INIT_LIST_HEAD(&et->list);
345 atomic_set(&et->node_cnt, 0);
346 atomic_inc(&eti->total_ext_tree);
348 atomic_dec(&eti->total_zombie_tree);
349 list_del_init(&et->list);
351 mutex_unlock(&eti->extent_tree_lock);
353 /* never died until evict_inode */
354 F2FS_I(inode)->extent_tree[type] = et;
359 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
360 struct extent_tree *et)
362 struct rb_node *node, *next;
363 struct extent_node *en;
364 unsigned int count = atomic_read(&et->node_cnt);
366 node = rb_first_cached(&et->root);
368 next = rb_next(node);
369 en = rb_entry(node, struct extent_node, rb_node);
370 __release_extent_node(sbi, et, en);
374 return count - atomic_read(&et->node_cnt);
377 static void __drop_largest_extent(struct extent_tree *et,
378 pgoff_t fofs, unsigned int len)
380 if (fofs < et->largest.fofs + et->largest.len &&
381 fofs + len > et->largest.fofs) {
383 et->largest_updated = true;
387 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
389 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
390 struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
391 struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
392 struct extent_tree *et;
393 struct extent_node *en;
394 struct extent_info ei;
396 if (!__may_extent_tree(inode, EX_READ)) {
397 /* drop largest read extent */
398 if (i_ext && i_ext->len) {
399 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
401 set_page_dirty(ipage);
406 et = __grab_extent_tree(inode, EX_READ);
408 if (!i_ext || !i_ext->len)
411 get_read_extent_info(&ei, i_ext);
413 write_lock(&et->lock);
414 if (atomic_read(&et->node_cnt))
417 en = __attach_extent_node(sbi, et, &ei, NULL,
418 &et->root.rb_root.rb_node, true);
420 et->largest = en->ei;
423 spin_lock(&eti->extent_lock);
424 list_add_tail(&en->list, &eti->extent_list);
425 spin_unlock(&eti->extent_lock);
428 write_unlock(&et->lock);
430 if (!F2FS_I(inode)->extent_tree[EX_READ])
431 set_inode_flag(inode, FI_NO_EXTENT);
434 void f2fs_init_age_extent_tree(struct inode *inode)
436 if (!__init_may_extent_tree(inode, EX_BLOCK_AGE))
438 __grab_extent_tree(inode, EX_BLOCK_AGE);
441 void f2fs_init_extent_tree(struct inode *inode)
443 /* initialize read cache */
444 if (__init_may_extent_tree(inode, EX_READ))
445 __grab_extent_tree(inode, EX_READ);
447 /* initialize block age cache */
448 if (__init_may_extent_tree(inode, EX_BLOCK_AGE))
449 __grab_extent_tree(inode, EX_BLOCK_AGE);
452 static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
453 struct extent_info *ei, enum extent_type type)
455 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
456 struct extent_tree_info *eti = &sbi->extent_tree[type];
457 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
458 struct extent_node *en;
464 trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
466 read_lock(&et->lock);
468 if (type == EX_READ &&
469 et->largest.fofs <= pgofs &&
470 et->largest.fofs + et->largest.len > pgofs) {
473 stat_inc_largest_node_hit(sbi);
477 en = __lookup_extent_node(&et->root, et->cached_en, pgofs);
481 if (en == et->cached_en)
482 stat_inc_cached_node_hit(sbi, type);
484 stat_inc_rbtree_node_hit(sbi, type);
487 spin_lock(&eti->extent_lock);
488 if (!list_empty(&en->list)) {
489 list_move_tail(&en->list, &eti->extent_list);
492 spin_unlock(&eti->extent_lock);
495 stat_inc_total_hit(sbi, type);
496 read_unlock(&et->lock);
499 trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
500 else if (type == EX_BLOCK_AGE)
501 trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei);
505 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
506 struct extent_tree *et, struct extent_info *ei,
507 struct extent_node *prev_ex,
508 struct extent_node *next_ex)
510 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
511 struct extent_node *en = NULL;
513 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
514 prev_ex->ei.len += ei->len;
519 if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
520 next_ex->ei.fofs = ei->fofs;
521 next_ex->ei.len += ei->len;
522 if (et->type == EX_READ)
523 next_ex->ei.blk = ei->blk;
525 __release_extent_node(sbi, et, prev_ex);
533 __try_update_largest_extent(et, en);
535 spin_lock(&eti->extent_lock);
536 if (!list_empty(&en->list)) {
537 list_move_tail(&en->list, &eti->extent_list);
540 spin_unlock(&eti->extent_lock);
544 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
545 struct extent_tree *et, struct extent_info *ei,
546 struct rb_node **insert_p,
547 struct rb_node *insert_parent,
550 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
551 struct rb_node **p = &et->root.rb_root.rb_node;
552 struct rb_node *parent = NULL;
553 struct extent_node *en = NULL;
555 if (insert_p && insert_parent) {
556 parent = insert_parent;
563 /* look up extent_node in the rb tree */
566 en = rb_entry(parent, struct extent_node, rb_node);
568 if (ei->fofs < en->ei.fofs) {
570 } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
579 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
583 __try_update_largest_extent(et, en);
585 /* update in global extent list */
586 spin_lock(&eti->extent_lock);
587 list_add_tail(&en->list, &eti->extent_list);
589 spin_unlock(&eti->extent_lock);
593 static void __update_extent_tree_range(struct inode *inode,
594 struct extent_info *tei, enum extent_type type)
596 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
597 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
598 struct extent_node *en = NULL, *en1 = NULL;
599 struct extent_node *prev_en = NULL, *next_en = NULL;
600 struct extent_info ei, dei, prev;
601 struct rb_node **insert_p = NULL, *insert_parent = NULL;
602 unsigned int fofs = tei->fofs, len = tei->len;
603 unsigned int end = fofs + len;
604 bool updated = false;
605 bool leftmost = false;
611 trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
613 else if (type == EX_BLOCK_AGE)
614 trace_f2fs_update_age_extent_tree_range(inode, fofs, len,
615 tei->age, tei->last_blocks);
617 write_lock(&et->lock);
619 if (type == EX_READ) {
620 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
621 write_unlock(&et->lock);
629 * drop largest extent before lookup, in case it's already
630 * been shrunk from extent tree
632 __drop_largest_extent(et, fofs, len);
635 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
636 en = __lookup_extent_node_ret(&et->root,
639 &insert_p, &insert_parent,
644 /* 2. invalidate all extent nodes in range [fofs, fofs + len - 1] */
645 while (en && en->ei.fofs < end) {
646 unsigned int org_end;
647 int parts = 0; /* # of parts current extent split into */
649 next_en = en1 = NULL;
652 org_end = dei.fofs + dei.len;
653 f2fs_bug_on(sbi, fofs >= org_end);
655 if (fofs > dei.fofs && (type != EX_READ ||
656 fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
657 en->ei.len = fofs - en->ei.fofs;
662 if (end < org_end && (type != EX_READ ||
663 org_end - end >= F2FS_MIN_EXTENT_LEN)) {
665 __set_extent_info(&ei,
667 end - dei.fofs + dei.blk, false,
668 dei.age, dei.last_blocks,
670 en1 = __insert_extent_tree(sbi, et, &ei,
674 __set_extent_info(&en->ei,
675 end, en->ei.len - (end - dei.fofs),
676 en->ei.blk + (end - dei.fofs), true,
677 dei.age, dei.last_blocks,
685 struct rb_node *node = rb_next(&en->rb_node);
687 next_en = rb_entry_safe(node, struct extent_node,
692 __try_update_largest_extent(et, en);
694 __release_extent_node(sbi, et, en);
697 * if original extent is split into zero or two parts, extent
698 * tree has been altered by deletion or insertion, therefore
699 * invalidate pointers regard to tree.
703 insert_parent = NULL;
708 if (type == EX_BLOCK_AGE)
709 goto update_age_extent_cache;
711 /* 3. update extent in read extent cache */
712 BUG_ON(type != EX_READ);
715 __set_extent_info(&ei, fofs, len, tei->blk, false,
717 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
718 __insert_extent_tree(sbi, et, &ei,
719 insert_p, insert_parent, leftmost);
721 /* give up extent_cache, if split and small updates happen */
723 prev.len < F2FS_MIN_EXTENT_LEN &&
724 et->largest.len < F2FS_MIN_EXTENT_LEN) {
726 et->largest_updated = true;
727 set_inode_flag(inode, FI_NO_EXTENT);
731 if (is_inode_flag_set(inode, FI_NO_EXTENT))
732 __free_extent_tree(sbi, et);
734 if (et->largest_updated) {
735 et->largest_updated = false;
738 goto out_read_extent_cache;
739 update_age_extent_cache:
740 if (!tei->last_blocks)
741 goto out_read_extent_cache;
743 __set_extent_info(&ei, fofs, len, 0, false,
744 tei->age, tei->last_blocks, EX_BLOCK_AGE);
745 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
746 __insert_extent_tree(sbi, et, &ei,
747 insert_p, insert_parent, leftmost);
748 out_read_extent_cache:
749 write_unlock(&et->lock);
752 f2fs_mark_inode_dirty_sync(inode, true);
755 #ifdef CONFIG_F2FS_FS_COMPRESSION
756 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
757 pgoff_t fofs, block_t blkaddr, unsigned int llen,
760 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
761 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
762 struct extent_node *en = NULL;
763 struct extent_node *prev_en = NULL, *next_en = NULL;
764 struct extent_info ei;
765 struct rb_node **insert_p = NULL, *insert_parent = NULL;
766 bool leftmost = false;
768 trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
771 /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
772 if (is_inode_flag_set(inode, FI_NO_EXTENT))
775 write_lock(&et->lock);
777 en = __lookup_extent_node_ret(&et->root,
780 &insert_p, &insert_parent,
785 __set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ);
788 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
789 __insert_extent_tree(sbi, et, &ei,
790 insert_p, insert_parent, leftmost);
792 write_unlock(&et->lock);
796 static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi,
797 unsigned long long new,
798 unsigned long long old)
800 unsigned int rem_old, rem_new;
801 unsigned long long res;
802 unsigned int weight = sbi->last_age_weight;
804 res = div_u64_rem(new, 100, &rem_new) * (100 - weight)
805 + div_u64_rem(old, 100, &rem_old) * weight;
808 res += rem_new * (100 - weight) / 100;
810 res += rem_old * weight / 100;
815 /* This returns a new age and allocated blocks in ei */
816 static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
819 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
820 loff_t f_size = i_size_read(inode);
821 unsigned long long cur_blocks =
822 atomic64_read(&sbi->allocated_data_blocks);
823 struct extent_info tei = *ei; /* only fofs and len are valid */
826 * When I/O is not aligned to a PAGE_SIZE, update will happen to the last
827 * file block even in seq write. So don't record age for newly last file
830 if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
834 if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) {
835 unsigned long long cur_age;
837 if (cur_blocks >= tei.last_blocks)
838 cur_age = cur_blocks - tei.last_blocks;
840 /* allocated_data_blocks overflow */
841 cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
844 ei->age = __calculate_block_age(sbi, cur_age, tei.age);
847 ei->last_blocks = cur_blocks;
848 WARN_ON(ei->age > cur_blocks);
852 f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
854 /* the data block was allocated for the first time */
855 if (blkaddr == NEW_ADDR)
858 if (__is_valid_data_blkaddr(blkaddr) &&
859 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
865 * init block age with zero, this can happen when the block age extent
866 * was reclaimed due to memory constraint or system reboot
869 ei->last_blocks = cur_blocks;
873 static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
875 struct extent_info ei = {};
877 if (!__may_extent_tree(dn->inode, type))
880 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
884 if (type == EX_READ) {
885 if (dn->data_blkaddr == NEW_ADDR)
888 ei.blk = dn->data_blkaddr;
889 } else if (type == EX_BLOCK_AGE) {
890 if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr))
893 __update_extent_tree_range(dn->inode, &ei, type);
896 static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
897 enum extent_type type)
899 struct extent_tree_info *eti = &sbi->extent_tree[type];
900 struct extent_tree *et, *next;
901 struct extent_node *en;
902 unsigned int node_cnt = 0, tree_cnt = 0;
905 if (!atomic_read(&eti->total_zombie_tree))
908 if (!mutex_trylock(&eti->extent_tree_lock))
911 /* 1. remove unreferenced extent tree */
912 list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
913 if (atomic_read(&et->node_cnt)) {
914 write_lock(&et->lock);
915 node_cnt += __free_extent_tree(sbi, et);
916 write_unlock(&et->lock);
918 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
919 list_del_init(&et->list);
920 radix_tree_delete(&eti->extent_tree_root, et->ino);
921 kmem_cache_free(extent_tree_slab, et);
922 atomic_dec(&eti->total_ext_tree);
923 atomic_dec(&eti->total_zombie_tree);
926 if (node_cnt + tree_cnt >= nr_shrink)
930 mutex_unlock(&eti->extent_tree_lock);
933 /* 2. remove LRU extent entries */
934 if (!mutex_trylock(&eti->extent_tree_lock))
937 remained = nr_shrink - (node_cnt + tree_cnt);
939 spin_lock(&eti->extent_lock);
940 for (; remained > 0; remained--) {
941 if (list_empty(&eti->extent_list))
943 en = list_first_entry(&eti->extent_list,
944 struct extent_node, list);
946 if (!write_trylock(&et->lock)) {
947 /* refresh this extent node's position in extent list */
948 list_move_tail(&en->list, &eti->extent_list);
952 list_del_init(&en->list);
953 spin_unlock(&eti->extent_lock);
955 __detach_extent_node(sbi, et, en);
957 write_unlock(&et->lock);
959 spin_lock(&eti->extent_lock);
961 spin_unlock(&eti->extent_lock);
964 mutex_unlock(&eti->extent_tree_lock);
966 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
968 return node_cnt + tree_cnt;
971 /* read extent cache operations */
972 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
973 struct extent_info *ei)
975 if (!__may_extent_tree(inode, EX_READ))
978 return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
981 bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
984 struct extent_info ei = {};
986 if (!f2fs_lookup_read_extent_cache(inode, index, &ei))
988 *blkaddr = ei.blk + index - ei.fofs;
992 void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
994 return __update_extent_cache(dn, EX_READ);
997 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
998 pgoff_t fofs, block_t blkaddr, unsigned int len)
1000 struct extent_info ei = {
1006 if (!__may_extent_tree(dn->inode, EX_READ))
1009 __update_extent_tree_range(dn->inode, &ei, EX_READ);
1012 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1014 if (!test_opt(sbi, READ_EXTENT_CACHE))
1017 return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
1020 /* block age extent cache operations */
1021 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
1022 struct extent_info *ei)
1024 if (!__may_extent_tree(inode, EX_BLOCK_AGE))
1027 return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE);
1030 void f2fs_update_age_extent_cache(struct dnode_of_data *dn)
1032 return __update_extent_cache(dn, EX_BLOCK_AGE);
1035 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
1036 pgoff_t fofs, unsigned int len)
1038 struct extent_info ei = {
1043 if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE))
1046 __update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE);
1049 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1051 if (!test_opt(sbi, AGE_EXTENT_CACHE))
1054 return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
1057 static unsigned int __destroy_extent_node(struct inode *inode,
1058 enum extent_type type)
1060 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1061 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1062 unsigned int node_cnt = 0;
1064 if (!et || !atomic_read(&et->node_cnt))
1067 write_lock(&et->lock);
1068 node_cnt = __free_extent_tree(sbi, et);
1069 write_unlock(&et->lock);
1074 void f2fs_destroy_extent_node(struct inode *inode)
1076 __destroy_extent_node(inode, EX_READ);
1077 __destroy_extent_node(inode, EX_BLOCK_AGE);
1080 static void __drop_extent_tree(struct inode *inode, enum extent_type type)
1082 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1083 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1084 bool updated = false;
1086 if (!__may_extent_tree(inode, type))
1089 write_lock(&et->lock);
1090 __free_extent_tree(sbi, et);
1091 if (type == EX_READ) {
1092 set_inode_flag(inode, FI_NO_EXTENT);
1093 if (et->largest.len) {
1094 et->largest.len = 0;
1098 write_unlock(&et->lock);
1100 f2fs_mark_inode_dirty_sync(inode, true);
1103 void f2fs_drop_extent_tree(struct inode *inode)
1105 __drop_extent_tree(inode, EX_READ);
1106 __drop_extent_tree(inode, EX_BLOCK_AGE);
1109 static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
1111 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1112 struct extent_tree_info *eti = &sbi->extent_tree[type];
1113 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1114 unsigned int node_cnt = 0;
1119 if (inode->i_nlink && !is_bad_inode(inode) &&
1120 atomic_read(&et->node_cnt)) {
1121 mutex_lock(&eti->extent_tree_lock);
1122 list_add_tail(&et->list, &eti->zombie_list);
1123 atomic_inc(&eti->total_zombie_tree);
1124 mutex_unlock(&eti->extent_tree_lock);
1128 /* free all extent info belong to this extent tree */
1129 node_cnt = __destroy_extent_node(inode, type);
1131 /* delete extent tree entry in radix tree */
1132 mutex_lock(&eti->extent_tree_lock);
1133 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
1134 radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
1135 kmem_cache_free(extent_tree_slab, et);
1136 atomic_dec(&eti->total_ext_tree);
1137 mutex_unlock(&eti->extent_tree_lock);
1139 F2FS_I(inode)->extent_tree[type] = NULL;
1141 trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
1144 void f2fs_destroy_extent_tree(struct inode *inode)
1146 __destroy_extent_tree(inode, EX_READ);
1147 __destroy_extent_tree(inode, EX_BLOCK_AGE);
1150 static void __init_extent_tree_info(struct extent_tree_info *eti)
1152 INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
1153 mutex_init(&eti->extent_tree_lock);
1154 INIT_LIST_HEAD(&eti->extent_list);
1155 spin_lock_init(&eti->extent_lock);
1156 atomic_set(&eti->total_ext_tree, 0);
1157 INIT_LIST_HEAD(&eti->zombie_list);
1158 atomic_set(&eti->total_zombie_tree, 0);
1159 atomic_set(&eti->total_ext_node, 0);
1162 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
1164 __init_extent_tree_info(&sbi->extent_tree[EX_READ]);
1165 __init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]);
1167 /* initialize for block age extents */
1168 atomic64_set(&sbi->allocated_data_blocks, 0);
1169 sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
1170 sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
1171 sbi->last_age_weight = LAST_AGE_WEIGHT;
1174 int __init f2fs_create_extent_cache(void)
1176 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1177 sizeof(struct extent_tree));
1178 if (!extent_tree_slab)
1180 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1181 sizeof(struct extent_node));
1182 if (!extent_node_slab) {
1183 kmem_cache_destroy(extent_tree_slab);
1189 void f2fs_destroy_extent_cache(void)
1191 kmem_cache_destroy(extent_node_slab);
1192 kmem_cache_destroy(extent_tree_slab);