1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/backing-dev.h>
12 #include <linux/init.h>
13 #include <linux/f2fs_fs.h>
14 #include <linux/kthread.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/sched/signal.h>
23 #include <trace/events/f2fs.h>
25 static struct kmem_cache *victim_entry_slab;
27 static unsigned int count_bits(const unsigned long *addr,
28 unsigned int offset, unsigned int len);
30 static int gc_thread_func(void *data)
32 struct f2fs_sb_info *sbi = data;
33 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
34 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
37 wait_ms = gc_th->min_sleep_time;
43 wait_event_interruptible_timeout(*wq,
44 kthread_should_stop() || freezing(current) ||
46 msecs_to_jiffies(wait_ms));
48 /* give it a try one time */
52 if (try_to_freeze()) {
53 stat_other_skip_bggc_count(sbi);
56 if (kthread_should_stop())
59 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
60 increase_sleep_time(gc_th, &wait_ms);
61 stat_other_skip_bggc_count(sbi);
65 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
66 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
67 f2fs_stop_checkpoint(sbi, false);
70 if (!sb_start_write_trylock(sbi->sb)) {
71 stat_other_skip_bggc_count(sbi);
76 * [GC triggering condition]
77 * 0. GC is not conducted currently.
78 * 1. There are enough dirty segments.
79 * 2. IO subsystem is idle by checking the # of writeback pages.
80 * 3. IO subsystem is idle by checking the # of requests in
81 * bdev's request list.
83 * Note) We have to avoid triggering GCs frequently.
84 * Because it is possible that some segments can be
85 * invalidated soon after by user update or deletion.
86 * So, I'd like to wait some time to collect dirty segments.
88 if (sbi->gc_mode == GC_URGENT_HIGH) {
89 wait_ms = gc_th->urgent_sleep_time;
90 down_write(&sbi->gc_lock);
94 if (!down_write_trylock(&sbi->gc_lock)) {
95 stat_other_skip_bggc_count(sbi);
99 if (!is_idle(sbi, GC_TIME)) {
100 increase_sleep_time(gc_th, &wait_ms);
101 up_write(&sbi->gc_lock);
102 stat_io_skip_bggc_count(sbi);
106 if (has_enough_invalid_blocks(sbi))
107 decrease_sleep_time(gc_th, &wait_ms);
109 increase_sleep_time(gc_th, &wait_ms);
111 stat_inc_bggc_count(sbi->stat_info);
113 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
115 /* if return value is not zero, no victim was selected */
116 if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
117 wait_ms = gc_th->no_gc_sleep_time;
119 trace_f2fs_background_gc(sbi->sb, wait_ms,
120 prefree_segments(sbi), free_segments(sbi));
122 /* balancing f2fs's metadata periodically */
123 f2fs_balance_fs_bg(sbi, true);
125 sb_end_write(sbi->sb);
127 } while (!kthread_should_stop());
131 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
133 struct f2fs_gc_kthread *gc_th;
134 dev_t dev = sbi->sb->s_bdev->bd_dev;
137 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
143 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
144 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
145 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
146 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
150 sbi->gc_thread = gc_th;
151 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
152 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
153 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
154 if (IS_ERR(gc_th->f2fs_gc_task)) {
155 err = PTR_ERR(gc_th->f2fs_gc_task);
157 sbi->gc_thread = NULL;
163 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
165 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
168 kthread_stop(gc_th->f2fs_gc_task);
170 sbi->gc_thread = NULL;
173 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
177 if (gc_type == BG_GC) {
178 if (sbi->am.atgc_enabled)
186 switch (sbi->gc_mode) {
202 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
203 int type, struct victim_sel_policy *p)
205 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
207 if (p->alloc_mode == SSR) {
208 p->gc_mode = GC_GREEDY;
209 p->dirty_bitmap = dirty_i->dirty_segmap[type];
210 p->max_search = dirty_i->nr_dirty[type];
212 } else if (p->alloc_mode == AT_SSR) {
213 p->gc_mode = GC_GREEDY;
214 p->dirty_bitmap = dirty_i->dirty_segmap[type];
215 p->max_search = dirty_i->nr_dirty[type];
218 p->gc_mode = select_gc_type(sbi, gc_type);
219 p->ofs_unit = sbi->segs_per_sec;
220 if (__is_large_section(sbi)) {
221 p->dirty_bitmap = dirty_i->dirty_secmap;
222 p->max_search = count_bits(p->dirty_bitmap,
225 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
226 p->max_search = dirty_i->nr_dirty[DIRTY];
231 * adjust candidates range, should select all dirty segments for
232 * foreground GC and urgent GC cases.
234 if (gc_type != FG_GC &&
235 (sbi->gc_mode != GC_URGENT_HIGH) &&
236 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
237 p->max_search > sbi->max_victim_search)
238 p->max_search = sbi->max_victim_search;
240 /* let's select beginning hot/small space first in no_heap mode*/
241 if (test_opt(sbi, NOHEAP) &&
242 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
245 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
248 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
249 struct victim_sel_policy *p)
251 /* SSR allocates in a segment unit */
252 if (p->alloc_mode == SSR)
253 return sbi->blocks_per_seg;
254 else if (p->alloc_mode == AT_SSR)
258 if (p->gc_mode == GC_GREEDY)
259 return 2 * sbi->blocks_per_seg * p->ofs_unit;
260 else if (p->gc_mode == GC_CB)
262 else if (p->gc_mode == GC_AT)
264 else /* No other gc_mode */
268 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
270 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
274 * If the gc_type is FG_GC, we can select victim segments
275 * selected by background GC before.
276 * Those segments guarantee they have small valid blocks.
278 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
279 if (sec_usage_check(sbi, secno))
281 clear_bit(secno, dirty_i->victim_secmap);
282 return GET_SEG_FROM_SEC(sbi, secno);
287 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
289 struct sit_info *sit_i = SIT_I(sbi);
290 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
291 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
292 unsigned long long mtime = 0;
293 unsigned int vblocks;
294 unsigned char age = 0;
297 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
299 for (i = 0; i < usable_segs_per_sec; i++)
300 mtime += get_seg_entry(sbi, start + i)->mtime;
301 vblocks = get_valid_blocks(sbi, segno, true);
303 mtime = div_u64(mtime, usable_segs_per_sec);
304 vblocks = div_u64(vblocks, usable_segs_per_sec);
306 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
308 /* Handle if the system time has changed by the user */
309 if (mtime < sit_i->min_mtime)
310 sit_i->min_mtime = mtime;
311 if (mtime > sit_i->max_mtime)
312 sit_i->max_mtime = mtime;
313 if (sit_i->max_mtime != sit_i->min_mtime)
314 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
315 sit_i->max_mtime - sit_i->min_mtime);
317 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
320 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
321 unsigned int segno, struct victim_sel_policy *p)
323 if (p->alloc_mode == SSR)
324 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
326 /* alloc_mode == LFS */
327 if (p->gc_mode == GC_GREEDY)
328 return get_valid_blocks(sbi, segno, true);
329 else if (p->gc_mode == GC_CB)
330 return get_cb_cost(sbi, segno);
336 static unsigned int count_bits(const unsigned long *addr,
337 unsigned int offset, unsigned int len)
339 unsigned int end = offset + len, sum = 0;
341 while (offset < end) {
342 if (test_bit(offset++, addr))
348 static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
349 unsigned long long mtime, unsigned int segno,
350 struct rb_node *parent, struct rb_node **p,
353 struct atgc_management *am = &sbi->am;
354 struct victim_entry *ve;
356 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS);
361 rb_link_node(&ve->rb_node, parent, p);
362 rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
364 list_add_tail(&ve->list, &am->victim_list);
371 static void insert_victim_entry(struct f2fs_sb_info *sbi,
372 unsigned long long mtime, unsigned int segno)
374 struct atgc_management *am = &sbi->am;
376 struct rb_node *parent = NULL;
377 bool left_most = true;
379 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
380 attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
383 static void add_victim_entry(struct f2fs_sb_info *sbi,
384 struct victim_sel_policy *p, unsigned int segno)
386 struct sit_info *sit_i = SIT_I(sbi);
387 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
388 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
389 unsigned long long mtime = 0;
392 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
393 if (p->gc_mode == GC_AT &&
394 get_valid_blocks(sbi, segno, true) == 0)
398 for (i = 0; i < sbi->segs_per_sec; i++)
399 mtime += get_seg_entry(sbi, start + i)->mtime;
400 mtime = div_u64(mtime, sbi->segs_per_sec);
402 /* Handle if the system time has changed by the user */
403 if (mtime < sit_i->min_mtime)
404 sit_i->min_mtime = mtime;
405 if (mtime > sit_i->max_mtime)
406 sit_i->max_mtime = mtime;
407 if (mtime < sit_i->dirty_min_mtime)
408 sit_i->dirty_min_mtime = mtime;
409 if (mtime > sit_i->dirty_max_mtime)
410 sit_i->dirty_max_mtime = mtime;
412 /* don't choose young section as candidate */
413 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
416 insert_victim_entry(sbi, mtime, segno);
419 static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
420 struct victim_sel_policy *p)
422 struct atgc_management *am = &sbi->am;
423 struct rb_node *parent = NULL;
426 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
431 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
432 struct victim_sel_policy *p)
434 struct sit_info *sit_i = SIT_I(sbi);
435 struct atgc_management *am = &sbi->am;
436 struct rb_root_cached *root = &am->root;
437 struct rb_node *node;
439 struct victim_entry *ve;
440 unsigned long long total_time;
441 unsigned long long age, u, accu;
442 unsigned long long max_mtime = sit_i->dirty_max_mtime;
443 unsigned long long min_mtime = sit_i->dirty_min_mtime;
444 unsigned int sec_blocks = BLKS_PER_SEC(sbi);
445 unsigned int vblocks;
446 unsigned int dirty_threshold = max(am->max_candidate_count,
447 am->candidate_ratio *
448 am->victim_count / 100);
449 unsigned int age_weight = am->age_weight;
451 unsigned int iter = 0;
453 if (max_mtime < min_mtime)
457 total_time = max_mtime - min_mtime;
459 accu = div64_u64(ULLONG_MAX, total_time);
460 accu = min_t(unsigned long long, div_u64(accu, 100),
461 DEFAULT_ACCURACY_CLASS);
463 node = rb_first_cached(root);
465 re = rb_entry_safe(node, struct rb_entry, rb_node);
469 ve = (struct victim_entry *)re;
471 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
474 /* age = 10000 * x% * 60 */
475 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
478 vblocks = get_valid_blocks(sbi, ve->segno, true);
479 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
481 /* u = 10000 * x% * 40 */
482 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
485 f2fs_bug_on(sbi, age + u >= UINT_MAX);
487 cost = UINT_MAX - (age + u);
490 if (cost < p->min_cost ||
491 (cost == p->min_cost && age > p->oldest_age)) {
494 p->min_segno = ve->segno;
497 if (iter < dirty_threshold) {
498 node = rb_next(node);
504 * select candidates around source section in range of
505 * [target - dirty_threshold, target + dirty_threshold]
507 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
508 struct victim_sel_policy *p)
510 struct sit_info *sit_i = SIT_I(sbi);
511 struct atgc_management *am = &sbi->am;
512 struct rb_node *node;
514 struct victim_entry *ve;
515 unsigned long long age;
516 unsigned long long max_mtime = sit_i->dirty_max_mtime;
517 unsigned long long min_mtime = sit_i->dirty_min_mtime;
518 unsigned int seg_blocks = sbi->blocks_per_seg;
519 unsigned int vblocks;
520 unsigned int dirty_threshold = max(am->max_candidate_count,
521 am->candidate_ratio *
522 am->victim_count / 100);
524 unsigned int iter = 0;
527 if (max_mtime < min_mtime)
531 node = lookup_central_victim(sbi, p);
533 re = rb_entry_safe(node, struct rb_entry, rb_node);
540 ve = (struct victim_entry *)re;
542 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
545 age = max_mtime - ve->mtime;
547 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
548 f2fs_bug_on(sbi, !vblocks);
551 if (vblocks == seg_blocks)
556 age = max_mtime - abs(p->age - age);
557 cost = UINT_MAX - vblocks;
559 if (cost < p->min_cost ||
560 (cost == p->min_cost && age > p->oldest_age)) {
563 p->min_segno = ve->segno;
566 if (iter < dirty_threshold) {
568 node = rb_prev(node);
570 node = rb_next(node);
580 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
581 struct victim_sel_policy *p)
583 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
584 &sbi->am.root, true));
586 if (p->gc_mode == GC_AT)
587 atgc_lookup_victim(sbi, p);
588 else if (p->alloc_mode == AT_SSR)
589 atssr_lookup_victim(sbi, p);
594 static void release_victim_entry(struct f2fs_sb_info *sbi)
596 struct atgc_management *am = &sbi->am;
597 struct victim_entry *ve, *tmp;
599 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
601 kmem_cache_free(victim_entry_slab, ve);
605 am->root = RB_ROOT_CACHED;
607 f2fs_bug_on(sbi, am->victim_count);
608 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
612 * This function is called from two paths.
613 * One is garbage collection and the other is SSR segment selection.
614 * When it is called during GC, it just gets a victim segment
615 * and it does not remove it from dirty seglist.
616 * When it is called from SSR segment selection, it finds a segment
617 * which has minimum valid blocks and removes it from dirty seglist.
619 static int get_victim_by_default(struct f2fs_sb_info *sbi,
620 unsigned int *result, int gc_type, int type,
621 char alloc_mode, unsigned long long age)
623 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
624 struct sit_info *sm = SIT_I(sbi);
625 struct victim_sel_policy p;
626 unsigned int secno, last_victim;
627 unsigned int last_segment;
628 unsigned int nsearched;
632 mutex_lock(&dirty_i->seglist_lock);
633 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
635 p.alloc_mode = alloc_mode;
637 p.age_threshold = sbi->am.age_threshold;
640 select_policy(sbi, gc_type, type, &p);
641 p.min_segno = NULL_SEGNO;
643 p.min_cost = get_max_cost(sbi, &p);
645 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
649 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
651 if (*result != NULL_SEGNO) {
652 if (!get_valid_blocks(sbi, *result, false)) {
657 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
660 p.min_segno = *result;
665 if (p.max_search == 0)
668 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
669 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
670 p.min_segno = sbi->next_victim_seg[BG_GC];
671 *result = p.min_segno;
672 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
675 if (gc_type == FG_GC &&
676 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
677 p.min_segno = sbi->next_victim_seg[FG_GC];
678 *result = p.min_segno;
679 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
684 last_victim = sm->last_victim[p.gc_mode];
685 if (p.alloc_mode == LFS && gc_type == FG_GC) {
686 p.min_segno = check_bg_victims(sbi);
687 if (p.min_segno != NULL_SEGNO)
692 unsigned long cost, *dirty_bitmap;
693 unsigned int unit_no, segno;
695 dirty_bitmap = p.dirty_bitmap;
696 unit_no = find_next_bit(dirty_bitmap,
697 last_segment / p.ofs_unit,
698 p.offset / p.ofs_unit);
699 segno = unit_no * p.ofs_unit;
700 if (segno >= last_segment) {
701 if (sm->last_victim[p.gc_mode]) {
703 sm->last_victim[p.gc_mode];
704 sm->last_victim[p.gc_mode] = 0;
711 p.offset = segno + p.ofs_unit;
714 #ifdef CONFIG_F2FS_CHECK_FS
716 * skip selecting the invalid segno (that is failed due to block
717 * validity check failure during GC) to avoid endless GC loop in
720 if (test_bit(segno, sm->invalid_segmap))
724 secno = GET_SEC_FROM_SEG(sbi, segno);
726 if (sec_usage_check(sbi, secno))
729 /* Don't touch checkpointed data */
730 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
731 if (p.alloc_mode == LFS) {
733 * LFS is set to find source section during GC.
734 * The victim should have no checkpointed data.
736 if (get_ckpt_valid_blocks(sbi, segno, true))
740 * SSR | AT_SSR are set to find target segment
741 * for writes which can be full by checkpointed
742 * and newly written blocks.
744 if (!f2fs_segment_has_free_slot(sbi, segno))
749 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
753 add_victim_entry(sbi, &p, segno);
757 cost = get_gc_cost(sbi, segno, &p);
759 if (p.min_cost > cost) {
764 if (nsearched >= p.max_search) {
765 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
766 sm->last_victim[p.gc_mode] =
767 last_victim + p.ofs_unit;
769 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
770 sm->last_victim[p.gc_mode] %=
771 (MAIN_SECS(sbi) * sbi->segs_per_sec);
776 /* get victim for GC_AT/AT_SSR */
778 lookup_victim_by_age(sbi, &p);
779 release_victim_entry(sbi);
782 if (is_atgc && p.min_segno == NULL_SEGNO &&
783 sm->elapsed_time < p.age_threshold) {
788 if (p.min_segno != NULL_SEGNO) {
790 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
792 if (p.alloc_mode == LFS) {
793 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
794 if (gc_type == FG_GC)
795 sbi->cur_victim_sec = secno;
797 set_bit(secno, dirty_i->victim_secmap);
803 if (p.min_segno != NULL_SEGNO)
804 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
806 prefree_segments(sbi), free_segments(sbi));
807 mutex_unlock(&dirty_i->seglist_lock);
812 static const struct victim_selection default_v_ops = {
813 .get_victim = get_victim_by_default,
816 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
818 struct inode_entry *ie;
820 ie = radix_tree_lookup(&gc_list->iroot, ino);
826 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
828 struct inode_entry *new_ie;
830 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
834 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
835 new_ie->inode = inode;
837 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
838 list_add_tail(&new_ie->list, &gc_list->ilist);
841 static void put_gc_inode(struct gc_inode_list *gc_list)
843 struct inode_entry *ie, *next_ie;
844 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
845 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
848 kmem_cache_free(f2fs_inode_entry_slab, ie);
852 static int check_valid_map(struct f2fs_sb_info *sbi,
853 unsigned int segno, int offset)
855 struct sit_info *sit_i = SIT_I(sbi);
856 struct seg_entry *sentry;
859 down_read(&sit_i->sentry_lock);
860 sentry = get_seg_entry(sbi, segno);
861 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
862 up_read(&sit_i->sentry_lock);
867 * This function compares node address got in summary with that in NAT.
868 * On validity, copy that node with cold status, otherwise (invalid node)
871 static int gc_node_segment(struct f2fs_sb_info *sbi,
872 struct f2fs_summary *sum, unsigned int segno, int gc_type)
874 struct f2fs_summary *entry;
878 bool fggc = (gc_type == FG_GC);
880 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
882 start_addr = START_BLOCK(sbi, segno);
887 if (fggc && phase == 2)
888 atomic_inc(&sbi->wb_sync_req[NODE]);
890 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
891 nid_t nid = le32_to_cpu(entry->nid);
892 struct page *node_page;
896 /* stop BG_GC if there is not enough free sections. */
897 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
900 if (check_valid_map(sbi, segno, off) == 0)
904 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
910 f2fs_ra_node_page(sbi, nid);
915 node_page = f2fs_get_node_page(sbi, nid);
916 if (IS_ERR(node_page))
919 /* block may become invalid during f2fs_get_node_page */
920 if (check_valid_map(sbi, segno, off) == 0) {
921 f2fs_put_page(node_page, 1);
925 if (f2fs_get_node_info(sbi, nid, &ni)) {
926 f2fs_put_page(node_page, 1);
930 if (ni.blk_addr != start_addr + off) {
931 f2fs_put_page(node_page, 1);
935 err = f2fs_move_node_page(node_page, gc_type);
936 if (!err && gc_type == FG_GC)
938 stat_inc_node_blk_count(sbi, 1, gc_type);
945 atomic_dec(&sbi->wb_sync_req[NODE]);
950 * Calculate start block index indicating the given node offset.
951 * Be careful, caller should give this node offset only indicating direct node
952 * blocks. If any node offsets, which point the other types of node blocks such
953 * as indirect or double indirect node blocks, are given, it must be a caller's
956 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
958 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
966 } else if (node_ofs <= indirect_blks) {
967 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
968 bidx = node_ofs - 2 - dec;
970 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
971 bidx = node_ofs - 5 - dec;
973 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
976 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
977 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
979 struct page *node_page;
981 unsigned int ofs_in_node, max_addrs, base;
982 block_t source_blkaddr;
984 nid = le32_to_cpu(sum->nid);
985 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
987 node_page = f2fs_get_node_page(sbi, nid);
988 if (IS_ERR(node_page))
991 if (f2fs_get_node_info(sbi, nid, dni)) {
992 f2fs_put_page(node_page, 1);
996 if (sum->version != dni->version) {
997 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
999 set_sbi_flag(sbi, SBI_NEED_FSCK);
1002 if (f2fs_check_nid_range(sbi, dni->ino)) {
1003 f2fs_put_page(node_page, 1);
1007 if (IS_INODE(node_page)) {
1008 base = offset_in_addr(F2FS_INODE(node_page));
1009 max_addrs = DEF_ADDRS_PER_INODE;
1012 max_addrs = DEF_ADDRS_PER_BLOCK;
1015 if (base + ofs_in_node >= max_addrs) {
1016 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1017 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1018 f2fs_put_page(node_page, 1);
1022 *nofs = ofs_of_node(node_page);
1023 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1024 f2fs_put_page(node_page, 1);
1026 if (source_blkaddr != blkaddr) {
1027 #ifdef CONFIG_F2FS_CHECK_FS
1028 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1029 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1031 if (unlikely(check_valid_map(sbi, segno, offset))) {
1032 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1033 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
1034 blkaddr, source_blkaddr, segno);
1035 f2fs_bug_on(sbi, 1);
1044 static int ra_data_block(struct inode *inode, pgoff_t index)
1046 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1047 struct address_space *mapping = inode->i_mapping;
1048 struct dnode_of_data dn;
1050 struct extent_info ei = {0, 0, 0};
1051 struct f2fs_io_info fio = {
1053 .ino = inode->i_ino,
1058 .encrypted_page = NULL,
1064 page = f2fs_grab_cache_page(mapping, index, true);
1068 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1069 dn.data_blkaddr = ei.blk + index - ei.fofs;
1070 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1071 DATA_GENERIC_ENHANCE_READ))) {
1072 err = -EFSCORRUPTED;
1078 set_new_dnode(&dn, inode, NULL, NULL, 0);
1079 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1082 f2fs_put_dnode(&dn);
1084 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1088 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1089 DATA_GENERIC_ENHANCE))) {
1090 err = -EFSCORRUPTED;
1096 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1099 * don't cache encrypted data into meta inode until previous dirty
1100 * data were writebacked to avoid racing between GC and flush.
1102 f2fs_wait_on_page_writeback(page, DATA, true, true);
1104 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1106 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1108 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1109 if (!fio.encrypted_page) {
1114 err = f2fs_submit_page_bio(&fio);
1116 goto put_encrypted_page;
1117 f2fs_put_page(fio.encrypted_page, 0);
1118 f2fs_put_page(page, 1);
1120 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1121 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1125 f2fs_put_page(fio.encrypted_page, 1);
1127 f2fs_put_page(page, 1);
1132 * Move data block via META_MAPPING while keeping locked data page.
1133 * This can be used to move blocks, aka LBAs, directly on disk.
1135 static int move_data_block(struct inode *inode, block_t bidx,
1136 int gc_type, unsigned int segno, int off)
1138 struct f2fs_io_info fio = {
1139 .sbi = F2FS_I_SB(inode),
1140 .ino = inode->i_ino,
1145 .encrypted_page = NULL,
1149 struct dnode_of_data dn;
1150 struct f2fs_summary sum;
1151 struct node_info ni;
1152 struct page *page, *mpage;
1155 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1156 int type = fio.sbi->am.atgc_enabled ?
1157 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1159 /* do not read out */
1160 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1164 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1169 if (f2fs_is_atomic_file(inode)) {
1170 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1171 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1176 if (f2fs_is_pinned_file(inode)) {
1177 if (gc_type == FG_GC)
1178 f2fs_pin_file_control(inode, true);
1183 set_new_dnode(&dn, inode, NULL, NULL, 0);
1184 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1188 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1189 ClearPageUptodate(page);
1195 * don't cache encrypted data into meta inode until previous dirty
1196 * data were writebacked to avoid racing between GC and flush.
1198 f2fs_wait_on_page_writeback(page, DATA, true, true);
1200 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1202 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1206 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1210 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1213 down_write(&fio.sbi->io_order_lock);
1215 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1216 fio.old_blkaddr, false);
1222 fio.encrypted_page = mpage;
1224 /* read source block in mpage */
1225 if (!PageUptodate(mpage)) {
1226 err = f2fs_submit_page_bio(&fio);
1228 f2fs_put_page(mpage, 1);
1232 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1233 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1236 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1237 !PageUptodate(mpage))) {
1239 f2fs_put_page(mpage, 1);
1244 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1247 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1248 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1249 if (!fio.encrypted_page) {
1251 f2fs_put_page(mpage, 1);
1255 /* write target block */
1256 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1257 memcpy(page_address(fio.encrypted_page),
1258 page_address(mpage), PAGE_SIZE);
1259 f2fs_put_page(mpage, 1);
1260 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1261 fio.old_blkaddr, fio.old_blkaddr);
1263 set_page_dirty(fio.encrypted_page);
1264 if (clear_page_dirty_for_io(fio.encrypted_page))
1265 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1267 set_page_writeback(fio.encrypted_page);
1268 ClearPageError(page);
1270 /* allocate block address */
1271 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
1273 fio.op = REQ_OP_WRITE;
1274 fio.op_flags = REQ_SYNC;
1275 fio.new_blkaddr = newaddr;
1276 f2fs_submit_page_write(&fio);
1279 if (PageWriteback(fio.encrypted_page))
1280 end_page_writeback(fio.encrypted_page);
1284 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1286 f2fs_update_data_blkaddr(&dn, newaddr);
1287 set_inode_flag(inode, FI_APPEND_WRITE);
1288 if (page->index == 0)
1289 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1291 f2fs_put_page(fio.encrypted_page, 1);
1294 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1298 up_write(&fio.sbi->io_order_lock);
1300 f2fs_put_dnode(&dn);
1302 f2fs_put_page(page, 1);
1306 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1307 unsigned int segno, int off)
1312 page = f2fs_get_lock_data_page(inode, bidx, true);
1314 return PTR_ERR(page);
1316 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1321 if (f2fs_is_atomic_file(inode)) {
1322 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1323 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1327 if (f2fs_is_pinned_file(inode)) {
1328 if (gc_type == FG_GC)
1329 f2fs_pin_file_control(inode, true);
1334 if (gc_type == BG_GC) {
1335 if (PageWriteback(page)) {
1339 set_page_dirty(page);
1340 set_cold_data(page);
1342 struct f2fs_io_info fio = {
1343 .sbi = F2FS_I_SB(inode),
1344 .ino = inode->i_ino,
1348 .op_flags = REQ_SYNC,
1349 .old_blkaddr = NULL_ADDR,
1351 .encrypted_page = NULL,
1352 .need_lock = LOCK_REQ,
1353 .io_type = FS_GC_DATA_IO,
1355 bool is_dirty = PageDirty(page);
1358 f2fs_wait_on_page_writeback(page, DATA, true, true);
1360 set_page_dirty(page);
1361 if (clear_page_dirty_for_io(page)) {
1362 inode_dec_dirty_pages(inode);
1363 f2fs_remove_dirty_inode(inode);
1366 set_cold_data(page);
1368 err = f2fs_do_write_data_page(&fio);
1370 clear_cold_data(page);
1371 if (err == -ENOMEM) {
1372 congestion_wait(BLK_RW_ASYNC,
1373 DEFAULT_IO_TIMEOUT);
1377 set_page_dirty(page);
1381 f2fs_put_page(page, 1);
1386 * This function tries to get parent node of victim data block, and identifies
1387 * data block validity. If the block is valid, copy that with cold status and
1388 * modify parent node.
1389 * If the parent node is not valid or the data block address is different,
1390 * the victim data block is ignored.
1392 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1393 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1396 struct super_block *sb = sbi->sb;
1397 struct f2fs_summary *entry;
1402 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1404 start_addr = START_BLOCK(sbi, segno);
1409 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1410 struct page *data_page;
1411 struct inode *inode;
1412 struct node_info dni; /* dnode info for the data */
1413 unsigned int ofs_in_node, nofs;
1415 nid_t nid = le32_to_cpu(entry->nid);
1418 * stop BG_GC if there is not enough free sections.
1419 * Or, stop GC if the segment becomes fully valid caused by
1420 * race condition along with SSR block allocation.
1422 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1423 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1427 if (check_valid_map(sbi, segno, off) == 0)
1431 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1437 f2fs_ra_node_page(sbi, nid);
1441 /* Get an inode by ino with checking validity */
1442 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1446 f2fs_ra_node_page(sbi, dni.ino);
1450 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1453 inode = f2fs_iget(sb, dni.ino);
1454 if (IS_ERR(inode) || is_bad_inode(inode) ||
1455 special_file(inode->i_mode)) {
1456 set_sbi_flag(sbi, SBI_NEED_FSCK);
1460 if (!down_write_trylock(
1461 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1463 sbi->skipped_gc_rwsem++;
1467 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1470 if (f2fs_post_read_required(inode)) {
1471 int err = ra_data_block(inode, start_bidx);
1473 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1478 add_gc_inode(gc_list, inode);
1482 data_page = f2fs_get_read_data_page(inode,
1483 start_bidx, REQ_RAHEAD, true);
1484 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1485 if (IS_ERR(data_page)) {
1490 f2fs_put_page(data_page, 0);
1491 add_gc_inode(gc_list, inode);
1496 inode = find_gc_inode(gc_list, dni.ino);
1498 struct f2fs_inode_info *fi = F2FS_I(inode);
1499 bool locked = false;
1502 if (S_ISREG(inode->i_mode)) {
1503 if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
1504 sbi->skipped_gc_rwsem++;
1507 if (!down_write_trylock(
1508 &fi->i_gc_rwsem[WRITE])) {
1509 sbi->skipped_gc_rwsem++;
1510 up_write(&fi->i_gc_rwsem[READ]);
1515 /* wait for all inflight aio data */
1516 inode_dio_wait(inode);
1519 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1521 if (f2fs_post_read_required(inode))
1522 err = move_data_block(inode, start_bidx,
1523 gc_type, segno, off);
1525 err = move_data_page(inode, start_bidx, gc_type,
1528 if (!err && (gc_type == FG_GC ||
1529 f2fs_post_read_required(inode)))
1533 up_write(&fi->i_gc_rwsem[WRITE]);
1534 up_write(&fi->i_gc_rwsem[READ]);
1537 stat_inc_data_blk_count(sbi, 1, gc_type);
1547 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1550 struct sit_info *sit_i = SIT_I(sbi);
1553 down_write(&sit_i->sentry_lock);
1554 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1555 NO_CHECK_TYPE, LFS, 0);
1556 up_write(&sit_i->sentry_lock);
1560 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1561 unsigned int start_segno,
1562 struct gc_inode_list *gc_list, int gc_type,
1565 struct page *sum_page;
1566 struct f2fs_summary_block *sum;
1567 struct blk_plug plug;
1568 unsigned int segno = start_segno;
1569 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1570 int seg_freed = 0, migrated = 0;
1571 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1572 SUM_TYPE_DATA : SUM_TYPE_NODE;
1575 if (__is_large_section(sbi))
1576 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1579 * zone-capacity can be less than zone-size in zoned devices,
1580 * resulting in less than expected usable segments in the zone,
1581 * calculate the end segno in the zone which can be garbage collected
1583 if (f2fs_sb_has_blkzoned(sbi))
1584 end_segno -= sbi->segs_per_sec -
1585 f2fs_usable_segs_in_sec(sbi, segno);
1587 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1589 /* readahead multi ssa blocks those have contiguous address */
1590 if (__is_large_section(sbi))
1591 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1592 end_segno - segno, META_SSA, true);
1594 /* reference all summary page */
1595 while (segno < end_segno) {
1596 sum_page = f2fs_get_sum_page(sbi, segno++);
1597 if (IS_ERR(sum_page)) {
1598 int err = PTR_ERR(sum_page);
1600 end_segno = segno - 1;
1601 for (segno = start_segno; segno < end_segno; segno++) {
1602 sum_page = find_get_page(META_MAPPING(sbi),
1603 GET_SUM_BLOCK(sbi, segno));
1604 f2fs_put_page(sum_page, 0);
1605 f2fs_put_page(sum_page, 0);
1609 unlock_page(sum_page);
1612 blk_start_plug(&plug);
1614 for (segno = start_segno; segno < end_segno; segno++) {
1616 /* find segment summary of victim */
1617 sum_page = find_get_page(META_MAPPING(sbi),
1618 GET_SUM_BLOCK(sbi, segno));
1619 f2fs_put_page(sum_page, 0);
1621 if (get_valid_blocks(sbi, segno, false) == 0)
1623 if (gc_type == BG_GC && __is_large_section(sbi) &&
1624 migrated >= sbi->migration_granularity)
1626 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1629 sum = page_address(sum_page);
1630 if (type != GET_SUM_TYPE((&sum->footer))) {
1631 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1632 segno, type, GET_SUM_TYPE((&sum->footer)));
1633 set_sbi_flag(sbi, SBI_NEED_FSCK);
1634 f2fs_stop_checkpoint(sbi, false);
1639 * this is to avoid deadlock:
1640 * - lock_page(sum_page) - f2fs_replace_block
1641 * - check_valid_map() - down_write(sentry_lock)
1642 * - down_read(sentry_lock) - change_curseg()
1643 * - lock_page(sum_page)
1645 if (type == SUM_TYPE_NODE)
1646 submitted += gc_node_segment(sbi, sum->entries, segno,
1649 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1653 stat_inc_seg_count(sbi, type, gc_type);
1657 if (gc_type == FG_GC &&
1658 get_valid_blocks(sbi, segno, false) == 0)
1661 if (__is_large_section(sbi))
1662 sbi->next_victim_seg[gc_type] =
1663 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1665 f2fs_put_page(sum_page, 0);
1669 f2fs_submit_merged_write(sbi,
1670 (type == SUM_TYPE_NODE) ? NODE : DATA);
1672 blk_finish_plug(&plug);
1674 stat_inc_call_count(sbi->stat_info);
1679 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1680 bool background, bool force, unsigned int segno)
1682 int gc_type = sync ? FG_GC : BG_GC;
1683 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1685 struct cp_control cpc;
1686 unsigned int init_segno = segno;
1687 struct gc_inode_list gc_list = {
1688 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1689 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1691 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1692 unsigned long long first_skipped;
1693 unsigned int skipped_round = 0, round = 0;
1695 trace_f2fs_gc_begin(sbi->sb, sync, background,
1696 get_pages(sbi, F2FS_DIRTY_NODES),
1697 get_pages(sbi, F2FS_DIRTY_DENTS),
1698 get_pages(sbi, F2FS_DIRTY_IMETA),
1701 reserved_segments(sbi),
1702 prefree_segments(sbi));
1704 cpc.reason = __get_cp_reason(sbi);
1705 sbi->skipped_gc_rwsem = 0;
1706 first_skipped = last_skipped;
1708 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1712 if (unlikely(f2fs_cp_error(sbi))) {
1717 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1719 * For example, if there are many prefree_segments below given
1720 * threshold, we can make them free by checkpoint. Then, we
1721 * secure free segments which doesn't need fggc any more.
1723 if (prefree_segments(sbi) &&
1724 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1725 ret = f2fs_write_checkpoint(sbi, &cpc);
1729 if (has_not_enough_free_secs(sbi, 0, 0))
1733 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1734 if (gc_type == BG_GC && !background) {
1738 ret = __get_victim(sbi, &segno, gc_type);
1742 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
1743 if (gc_type == FG_GC &&
1744 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1746 total_freed += seg_freed;
1748 if (gc_type == FG_GC) {
1749 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1750 sbi->skipped_gc_rwsem)
1752 last_skipped = sbi->skipped_atomic_files[FG_GC];
1756 if (gc_type == FG_GC && seg_freed)
1757 sbi->cur_victim_sec = NULL_SEGNO;
1762 if (!has_not_enough_free_secs(sbi, sec_freed, 0))
1765 if (skipped_round <= MAX_SKIP_GC_COUNT || skipped_round * 2 < round) {
1767 /* Write checkpoint to reclaim prefree segments */
1768 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1769 prefree_segments(sbi) &&
1770 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1771 ret = f2fs_write_checkpoint(sbi, &cpc);
1778 if (first_skipped < last_skipped &&
1779 (last_skipped - first_skipped) >
1780 sbi->skipped_gc_rwsem) {
1781 f2fs_drop_inmem_pages_all(sbi, true);
1785 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1786 ret = f2fs_write_checkpoint(sbi, &cpc);
1788 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1789 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1791 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1792 get_pages(sbi, F2FS_DIRTY_NODES),
1793 get_pages(sbi, F2FS_DIRTY_DENTS),
1794 get_pages(sbi, F2FS_DIRTY_IMETA),
1797 reserved_segments(sbi),
1798 prefree_segments(sbi));
1800 up_write(&sbi->gc_lock);
1802 put_gc_inode(&gc_list);
1805 ret = sec_freed ? 0 : -EAGAIN;
1809 int __init f2fs_create_garbage_collection_cache(void)
1811 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1812 sizeof(struct victim_entry));
1813 if (!victim_entry_slab)
1818 void f2fs_destroy_garbage_collection_cache(void)
1820 kmem_cache_destroy(victim_entry_slab);
1823 static void init_atgc_management(struct f2fs_sb_info *sbi)
1825 struct atgc_management *am = &sbi->am;
1827 if (test_opt(sbi, ATGC) &&
1828 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1829 am->atgc_enabled = true;
1831 am->root = RB_ROOT_CACHED;
1832 INIT_LIST_HEAD(&am->victim_list);
1833 am->victim_count = 0;
1835 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1836 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1837 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1838 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1841 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1843 DIRTY_I(sbi)->v_ops = &default_v_ops;
1845 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1847 /* give warm/cold data area from slower device */
1848 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1849 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1850 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1852 init_atgc_management(sbi);
1855 static int free_segment_range(struct f2fs_sb_info *sbi,
1856 unsigned int secs, bool gc_only)
1858 unsigned int segno, next_inuse, start, end;
1859 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1860 int gc_mode, gc_type;
1864 /* Force block allocation for GC */
1865 MAIN_SECS(sbi) -= secs;
1866 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1867 end = MAIN_SEGS(sbi) - 1;
1869 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1870 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1871 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1872 SIT_I(sbi)->last_victim[gc_mode] = 0;
1874 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1875 if (sbi->next_victim_seg[gc_type] >= start)
1876 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1877 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1879 /* Move out cursegs from the target range */
1880 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1881 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1883 /* do GC to move out valid blocks in the range */
1884 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1885 struct gc_inode_list gc_list = {
1886 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1887 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1890 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1891 put_gc_inode(&gc_list);
1893 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1897 if (fatal_signal_pending(current)) {
1905 err = f2fs_write_checkpoint(sbi, &cpc);
1909 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1910 if (next_inuse <= end) {
1911 f2fs_err(sbi, "segno %u should be free but still inuse!",
1913 f2fs_bug_on(sbi, 1);
1916 MAIN_SECS(sbi) += secs;
1920 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1922 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1925 int segment_count_main;
1926 long long block_count;
1927 int segs = secs * sbi->segs_per_sec;
1929 down_write(&sbi->sb_lock);
1931 section_count = le32_to_cpu(raw_sb->section_count);
1932 segment_count = le32_to_cpu(raw_sb->segment_count);
1933 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1934 block_count = le64_to_cpu(raw_sb->block_count);
1936 raw_sb->section_count = cpu_to_le32(section_count + secs);
1937 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1938 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1939 raw_sb->block_count = cpu_to_le64(block_count +
1940 (long long)segs * sbi->blocks_per_seg);
1941 if (f2fs_is_multi_device(sbi)) {
1942 int last_dev = sbi->s_ndevs - 1;
1944 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1946 raw_sb->devs[last_dev].total_segments =
1947 cpu_to_le32(dev_segs + segs);
1950 up_write(&sbi->sb_lock);
1953 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1955 int segs = secs * sbi->segs_per_sec;
1956 long long blks = (long long)segs * sbi->blocks_per_seg;
1957 long long user_block_count =
1958 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1960 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1961 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1962 MAIN_SECS(sbi) += secs;
1963 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1964 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1965 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1967 if (f2fs_is_multi_device(sbi)) {
1968 int last_dev = sbi->s_ndevs - 1;
1970 FDEV(last_dev).total_segments =
1971 (int)FDEV(last_dev).total_segments + segs;
1972 FDEV(last_dev).end_blk =
1973 (long long)FDEV(last_dev).end_blk + blks;
1974 #ifdef CONFIG_BLK_DEV_ZONED
1975 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1976 (int)(blks >> sbi->log_blocks_per_blkz);
1981 int f2fs_resize_fs(struct file *filp, __u64 block_count)
1983 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
1984 __u64 old_block_count, shrunk_blocks;
1985 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1990 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1991 if (block_count > old_block_count)
1994 if (f2fs_is_multi_device(sbi)) {
1995 int last_dev = sbi->s_ndevs - 1;
1996 __u64 last_segs = FDEV(last_dev).total_segments;
1998 if (block_count + last_segs * sbi->blocks_per_seg <=
2003 /* new fs size should align to section size */
2004 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2008 if (block_count == old_block_count)
2011 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2012 f2fs_err(sbi, "Should run fsck to repair first.");
2013 return -EFSCORRUPTED;
2016 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2017 f2fs_err(sbi, "Checkpoint should be enabled.");
2021 err = mnt_want_write_file(filp);
2025 shrunk_blocks = old_block_count - block_count;
2026 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2029 if (!down_write_trylock(&sbi->gc_lock)) {
2031 goto out_drop_write;
2034 /* stop CP to protect MAIN_SEC in free_segment_range */
2037 spin_lock(&sbi->stat_lock);
2038 if (shrunk_blocks + valid_user_blocks(sbi) +
2039 sbi->current_reserved_blocks + sbi->unusable_block_count +
2040 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2042 spin_unlock(&sbi->stat_lock);
2047 err = free_segment_range(sbi, secs, true);
2050 f2fs_unlock_op(sbi);
2051 up_write(&sbi->gc_lock);
2053 mnt_drop_write_file(filp);
2057 freeze_super(sbi->sb);
2059 if (f2fs_readonly(sbi->sb)) {
2060 thaw_super(sbi->sb);
2064 down_write(&sbi->gc_lock);
2065 mutex_lock(&sbi->cp_mutex);
2067 spin_lock(&sbi->stat_lock);
2068 if (shrunk_blocks + valid_user_blocks(sbi) +
2069 sbi->current_reserved_blocks + sbi->unusable_block_count +
2070 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2073 sbi->user_block_count -= shrunk_blocks;
2074 spin_unlock(&sbi->stat_lock);
2078 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2079 err = free_segment_range(sbi, secs, false);
2083 update_sb_metadata(sbi, -secs);
2085 err = f2fs_commit_super(sbi, false);
2087 update_sb_metadata(sbi, secs);
2091 update_fs_metadata(sbi, -secs);
2092 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2093 set_sbi_flag(sbi, SBI_IS_DIRTY);
2095 err = f2fs_write_checkpoint(sbi, &cpc);
2097 update_fs_metadata(sbi, secs);
2098 update_sb_metadata(sbi, secs);
2099 f2fs_commit_super(sbi, false);
2102 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2104 set_sbi_flag(sbi, SBI_NEED_FSCK);
2105 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2107 spin_lock(&sbi->stat_lock);
2108 sbi->user_block_count += shrunk_blocks;
2109 spin_unlock(&sbi->stat_lock);
2112 mutex_unlock(&sbi->cp_mutex);
2113 up_write(&sbi->gc_lock);
2114 thaw_super(sbi->sb);