1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/f2fs_fs.h>
12 #include <linux/kthread.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/random.h>
17 #include <linux/sched/mm.h>
24 #include <trace/events/f2fs.h>
26 static struct kmem_cache *victim_entry_slab;
28 static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
31 static int gc_thread_func(void *data)
33 struct f2fs_sb_info *sbi = data;
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false };
43 wait_ms = gc_th->min_sleep_time;
47 bool sync_mode, foreground = false;
49 wait_event_interruptible_timeout(*wq,
50 kthread_should_stop() || freezing(current) ||
51 waitqueue_active(fggc_wq) ||
53 msecs_to_jiffies(wait_ms));
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
58 /* give it a try one time */
62 if (try_to_freeze()) {
63 stat_other_skip_bggc_count(sbi);
66 if (kthread_should_stop())
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
70 increase_sleep_time(gc_th, &wait_ms);
71 stat_other_skip_bggc_count(sbi);
75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
76 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
77 f2fs_stop_checkpoint(sbi, false,
78 STOP_CP_REASON_FAULT_INJECT);
81 if (!sb_start_write_trylock(sbi->sb)) {
82 stat_other_skip_bggc_count(sbi);
87 * [GC triggering condition]
88 * 0. GC is not conducted currently.
89 * 1. There are enough dirty segments.
90 * 2. IO subsystem is idle by checking the # of writeback pages.
91 * 3. IO subsystem is idle by checking the # of requests in
92 * bdev's request list.
94 * Note) We have to avoid triggering GCs frequently.
95 * Because it is possible that some segments can be
96 * invalidated soon after by user update or deletion.
97 * So, I'd like to wait some time to collect dirty segments.
99 if (sbi->gc_mode == GC_URGENT_HIGH ||
100 sbi->gc_mode == GC_URGENT_MID) {
101 wait_ms = gc_th->urgent_sleep_time;
102 f2fs_down_write(&sbi->gc_lock);
107 f2fs_down_write(&sbi->gc_lock);
109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
110 stat_other_skip_bggc_count(sbi);
114 if (!is_idle(sbi, GC_TIME)) {
115 increase_sleep_time(gc_th, &wait_ms);
116 f2fs_up_write(&sbi->gc_lock);
117 stat_io_skip_bggc_count(sbi);
121 if (has_enough_invalid_blocks(sbi))
122 decrease_sleep_time(gc_th, &wait_ms);
124 increase_sleep_time(gc_th, &wait_ms);
127 stat_inc_bggc_count(sbi->stat_info);
129 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
131 /* foreground GC was been triggered via f2fs_balance_fs() */
135 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
136 gc_control.no_bg_gc = foreground;
137 gc_control.nr_free_secs = foreground ? 1 : 0;
139 /* if return value is not zero, no victim was selected */
140 if (f2fs_gc(sbi, &gc_control)) {
141 /* don't bother wait_ms by foreground gc */
143 wait_ms = gc_th->no_gc_sleep_time;
147 wake_up_all(&gc_th->fggc_wq);
149 trace_f2fs_background_gc(sbi->sb, wait_ms,
150 prefree_segments(sbi), free_segments(sbi));
152 /* balancing f2fs's metadata periodically */
153 f2fs_balance_fs_bg(sbi, true);
155 if (sbi->gc_mode == GC_URGENT_HIGH) {
156 spin_lock(&sbi->gc_urgent_high_lock);
157 if (sbi->gc_urgent_high_remaining) {
158 sbi->gc_urgent_high_remaining--;
159 if (!sbi->gc_urgent_high_remaining)
160 sbi->gc_mode = GC_NORMAL;
162 spin_unlock(&sbi->gc_urgent_high_lock);
164 sb_end_write(sbi->sb);
166 } while (!kthread_should_stop());
170 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
172 struct f2fs_gc_kthread *gc_th;
173 dev_t dev = sbi->sb->s_bdev->bd_dev;
176 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
182 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
183 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
184 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
185 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
189 sbi->gc_thread = gc_th;
190 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
191 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
192 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
193 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
194 if (IS_ERR(gc_th->f2fs_gc_task)) {
195 err = PTR_ERR(gc_th->f2fs_gc_task);
197 sbi->gc_thread = NULL;
203 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
205 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
209 kthread_stop(gc_th->f2fs_gc_task);
210 wake_up_all(&gc_th->fggc_wq);
212 sbi->gc_thread = NULL;
215 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
219 if (gc_type == BG_GC) {
220 if (sbi->am.atgc_enabled)
228 switch (sbi->gc_mode) {
244 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
245 int type, struct victim_sel_policy *p)
247 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
249 if (p->alloc_mode == SSR) {
250 p->gc_mode = GC_GREEDY;
251 p->dirty_bitmap = dirty_i->dirty_segmap[type];
252 p->max_search = dirty_i->nr_dirty[type];
254 } else if (p->alloc_mode == AT_SSR) {
255 p->gc_mode = GC_GREEDY;
256 p->dirty_bitmap = dirty_i->dirty_segmap[type];
257 p->max_search = dirty_i->nr_dirty[type];
260 p->gc_mode = select_gc_type(sbi, gc_type);
261 p->ofs_unit = sbi->segs_per_sec;
262 if (__is_large_section(sbi)) {
263 p->dirty_bitmap = dirty_i->dirty_secmap;
264 p->max_search = count_bits(p->dirty_bitmap,
267 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
268 p->max_search = dirty_i->nr_dirty[DIRTY];
273 * adjust candidates range, should select all dirty segments for
274 * foreground GC and urgent GC cases.
276 if (gc_type != FG_GC &&
277 (sbi->gc_mode != GC_URGENT_HIGH) &&
278 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
279 p->max_search > sbi->max_victim_search)
280 p->max_search = sbi->max_victim_search;
282 /* let's select beginning hot/small space first in no_heap mode*/
283 if (f2fs_need_rand_seg(sbi))
284 p->offset = prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec);
285 else if (test_opt(sbi, NOHEAP) &&
286 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
289 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
292 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
293 struct victim_sel_policy *p)
295 /* SSR allocates in a segment unit */
296 if (p->alloc_mode == SSR)
297 return sbi->blocks_per_seg;
298 else if (p->alloc_mode == AT_SSR)
302 if (p->gc_mode == GC_GREEDY)
303 return 2 * sbi->blocks_per_seg * p->ofs_unit;
304 else if (p->gc_mode == GC_CB)
306 else if (p->gc_mode == GC_AT)
308 else /* No other gc_mode */
312 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
314 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
318 * If the gc_type is FG_GC, we can select victim segments
319 * selected by background GC before.
320 * Those segments guarantee they have small valid blocks.
322 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
323 if (sec_usage_check(sbi, secno))
325 clear_bit(secno, dirty_i->victim_secmap);
326 return GET_SEG_FROM_SEC(sbi, secno);
331 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
333 struct sit_info *sit_i = SIT_I(sbi);
334 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
335 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
336 unsigned long long mtime = 0;
337 unsigned int vblocks;
338 unsigned char age = 0;
341 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
343 for (i = 0; i < usable_segs_per_sec; i++)
344 mtime += get_seg_entry(sbi, start + i)->mtime;
345 vblocks = get_valid_blocks(sbi, segno, true);
347 mtime = div_u64(mtime, usable_segs_per_sec);
348 vblocks = div_u64(vblocks, usable_segs_per_sec);
350 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
352 /* Handle if the system time has changed by the user */
353 if (mtime < sit_i->min_mtime)
354 sit_i->min_mtime = mtime;
355 if (mtime > sit_i->max_mtime)
356 sit_i->max_mtime = mtime;
357 if (sit_i->max_mtime != sit_i->min_mtime)
358 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
359 sit_i->max_mtime - sit_i->min_mtime);
361 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
364 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
365 unsigned int segno, struct victim_sel_policy *p)
367 if (p->alloc_mode == SSR)
368 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
370 /* alloc_mode == LFS */
371 if (p->gc_mode == GC_GREEDY)
372 return get_valid_blocks(sbi, segno, true);
373 else if (p->gc_mode == GC_CB)
374 return get_cb_cost(sbi, segno);
380 static unsigned int count_bits(const unsigned long *addr,
381 unsigned int offset, unsigned int len)
383 unsigned int end = offset + len, sum = 0;
385 while (offset < end) {
386 if (test_bit(offset++, addr))
392 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
393 struct rb_root_cached *root)
395 #ifdef CONFIG_F2FS_CHECK_FS
396 struct rb_node *cur = rb_first_cached(root), *next;
397 struct victim_entry *cur_ve, *next_ve;
404 cur_ve = rb_entry(cur, struct victim_entry, rb_node);
405 next_ve = rb_entry(next, struct victim_entry, rb_node);
407 if (cur_ve->mtime > next_ve->mtime) {
408 f2fs_info(sbi, "broken victim_rbtree, "
409 "cur_mtime(%llu) next_mtime(%llu)",
410 cur_ve->mtime, next_ve->mtime);
419 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
420 unsigned long long mtime)
422 struct atgc_management *am = &sbi->am;
423 struct rb_node *node = am->root.rb_root.rb_node;
424 struct victim_entry *ve = NULL;
427 ve = rb_entry(node, struct victim_entry, rb_node);
429 if (mtime < ve->mtime)
430 node = node->rb_left;
432 node = node->rb_right;
437 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
438 unsigned long long mtime, unsigned int segno)
440 struct atgc_management *am = &sbi->am;
441 struct victim_entry *ve;
443 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
448 list_add_tail(&ve->list, &am->victim_list);
454 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
455 unsigned long long mtime, unsigned int segno)
457 struct atgc_management *am = &sbi->am;
458 struct rb_root_cached *root = &am->root;
459 struct rb_node **p = &root->rb_root.rb_node;
460 struct rb_node *parent = NULL;
461 struct victim_entry *ve;
462 bool left_most = true;
464 /* look up rb tree to find parent node */
467 ve = rb_entry(parent, struct victim_entry, rb_node);
469 if (mtime < ve->mtime) {
477 ve = __create_victim_entry(sbi, mtime, segno);
479 rb_link_node(&ve->rb_node, parent, p);
480 rb_insert_color_cached(&ve->rb_node, root, left_most);
483 static void add_victim_entry(struct f2fs_sb_info *sbi,
484 struct victim_sel_policy *p, unsigned int segno)
486 struct sit_info *sit_i = SIT_I(sbi);
487 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
488 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
489 unsigned long long mtime = 0;
492 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
493 if (p->gc_mode == GC_AT &&
494 get_valid_blocks(sbi, segno, true) == 0)
498 for (i = 0; i < sbi->segs_per_sec; i++)
499 mtime += get_seg_entry(sbi, start + i)->mtime;
500 mtime = div_u64(mtime, sbi->segs_per_sec);
502 /* Handle if the system time has changed by the user */
503 if (mtime < sit_i->min_mtime)
504 sit_i->min_mtime = mtime;
505 if (mtime > sit_i->max_mtime)
506 sit_i->max_mtime = mtime;
507 if (mtime < sit_i->dirty_min_mtime)
508 sit_i->dirty_min_mtime = mtime;
509 if (mtime > sit_i->dirty_max_mtime)
510 sit_i->dirty_max_mtime = mtime;
512 /* don't choose young section as candidate */
513 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
516 __insert_victim_entry(sbi, mtime, segno);
519 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
520 struct victim_sel_policy *p)
522 struct sit_info *sit_i = SIT_I(sbi);
523 struct atgc_management *am = &sbi->am;
524 struct rb_root_cached *root = &am->root;
525 struct rb_node *node;
526 struct victim_entry *ve;
527 unsigned long long total_time;
528 unsigned long long age, u, accu;
529 unsigned long long max_mtime = sit_i->dirty_max_mtime;
530 unsigned long long min_mtime = sit_i->dirty_min_mtime;
531 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
532 unsigned int vblocks;
533 unsigned int dirty_threshold = max(am->max_candidate_count,
534 am->candidate_ratio *
535 am->victim_count / 100);
536 unsigned int age_weight = am->age_weight;
538 unsigned int iter = 0;
540 if (max_mtime < min_mtime)
544 total_time = max_mtime - min_mtime;
546 accu = div64_u64(ULLONG_MAX, total_time);
547 accu = min_t(unsigned long long, div_u64(accu, 100),
548 DEFAULT_ACCURACY_CLASS);
550 node = rb_first_cached(root);
552 ve = rb_entry_safe(node, struct victim_entry, rb_node);
556 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
559 /* age = 10000 * x% * 60 */
560 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
563 vblocks = get_valid_blocks(sbi, ve->segno, true);
564 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
566 /* u = 10000 * x% * 40 */
567 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
570 f2fs_bug_on(sbi, age + u >= UINT_MAX);
572 cost = UINT_MAX - (age + u);
575 if (cost < p->min_cost ||
576 (cost == p->min_cost && age > p->oldest_age)) {
579 p->min_segno = ve->segno;
582 if (iter < dirty_threshold) {
583 node = rb_next(node);
589 * select candidates around source section in range of
590 * [target - dirty_threshold, target + dirty_threshold]
592 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
593 struct victim_sel_policy *p)
595 struct sit_info *sit_i = SIT_I(sbi);
596 struct atgc_management *am = &sbi->am;
597 struct victim_entry *ve;
598 unsigned long long age;
599 unsigned long long max_mtime = sit_i->dirty_max_mtime;
600 unsigned long long min_mtime = sit_i->dirty_min_mtime;
601 unsigned int seg_blocks = sbi->blocks_per_seg;
602 unsigned int vblocks;
603 unsigned int dirty_threshold = max(am->max_candidate_count,
604 am->candidate_ratio *
605 am->victim_count / 100);
606 unsigned int cost, iter;
609 if (max_mtime < min_mtime)
614 ve = __lookup_victim_entry(sbi, p->age);
622 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
625 age = max_mtime - ve->mtime;
627 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
628 f2fs_bug_on(sbi, !vblocks);
631 if (vblocks == seg_blocks)
636 age = max_mtime - abs(p->age - age);
637 cost = UINT_MAX - vblocks;
639 if (cost < p->min_cost ||
640 (cost == p->min_cost && age > p->oldest_age)) {
643 p->min_segno = ve->segno;
646 if (iter < dirty_threshold) {
647 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
648 rb_next(&ve->rb_node),
649 struct victim_entry, rb_node);
657 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
658 struct victim_sel_policy *p)
660 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
662 if (p->gc_mode == GC_AT)
663 atgc_lookup_victim(sbi, p);
664 else if (p->alloc_mode == AT_SSR)
665 atssr_lookup_victim(sbi, p);
670 static void release_victim_entry(struct f2fs_sb_info *sbi)
672 struct atgc_management *am = &sbi->am;
673 struct victim_entry *ve, *tmp;
675 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
677 kmem_cache_free(victim_entry_slab, ve);
681 am->root = RB_ROOT_CACHED;
683 f2fs_bug_on(sbi, am->victim_count);
684 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
687 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
689 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
690 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
692 if (!dirty_i->enable_pin_section)
694 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
695 dirty_i->pinned_secmap_cnt++;
699 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
701 return dirty_i->pinned_secmap_cnt;
704 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
707 return dirty_i->enable_pin_section &&
708 f2fs_pinned_section_exists(dirty_i) &&
709 test_bit(secno, dirty_i->pinned_secmap);
712 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
714 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
716 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
717 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
718 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
720 DIRTY_I(sbi)->enable_pin_section = enable;
723 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
726 if (!f2fs_is_pinned_file(inode))
728 if (gc_type != FG_GC)
730 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
731 f2fs_pin_file_control(inode, true);
736 * This function is called from two paths.
737 * One is garbage collection and the other is SSR segment selection.
738 * When it is called during GC, it just gets a victim segment
739 * and it does not remove it from dirty seglist.
740 * When it is called from SSR segment selection, it finds a segment
741 * which has minimum valid blocks and removes it from dirty seglist.
743 static int get_victim_by_default(struct f2fs_sb_info *sbi,
744 unsigned int *result, int gc_type, int type,
745 char alloc_mode, unsigned long long age)
747 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
748 struct sit_info *sm = SIT_I(sbi);
749 struct victim_sel_policy p;
750 unsigned int secno, last_victim;
751 unsigned int last_segment;
752 unsigned int nsearched;
756 mutex_lock(&dirty_i->seglist_lock);
757 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
759 p.alloc_mode = alloc_mode;
761 p.age_threshold = sbi->am.age_threshold;
764 select_policy(sbi, gc_type, type, &p);
765 p.min_segno = NULL_SEGNO;
767 p.min_cost = get_max_cost(sbi, &p);
769 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
773 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
775 if (*result != NULL_SEGNO) {
776 if (!get_valid_blocks(sbi, *result, false)) {
781 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
784 p.min_segno = *result;
789 if (p.max_search == 0)
792 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
793 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
794 p.min_segno = sbi->next_victim_seg[BG_GC];
795 *result = p.min_segno;
796 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
799 if (gc_type == FG_GC &&
800 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
801 p.min_segno = sbi->next_victim_seg[FG_GC];
802 *result = p.min_segno;
803 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
808 last_victim = sm->last_victim[p.gc_mode];
809 if (p.alloc_mode == LFS && gc_type == FG_GC) {
810 p.min_segno = check_bg_victims(sbi);
811 if (p.min_segno != NULL_SEGNO)
816 unsigned long cost, *dirty_bitmap;
817 unsigned int unit_no, segno;
819 dirty_bitmap = p.dirty_bitmap;
820 unit_no = find_next_bit(dirty_bitmap,
821 last_segment / p.ofs_unit,
822 p.offset / p.ofs_unit);
823 segno = unit_no * p.ofs_unit;
824 if (segno >= last_segment) {
825 if (sm->last_victim[p.gc_mode]) {
827 sm->last_victim[p.gc_mode];
828 sm->last_victim[p.gc_mode] = 0;
835 p.offset = segno + p.ofs_unit;
838 #ifdef CONFIG_F2FS_CHECK_FS
840 * skip selecting the invalid segno (that is failed due to block
841 * validity check failure during GC) to avoid endless GC loop in
844 if (test_bit(segno, sm->invalid_segmap))
848 secno = GET_SEC_FROM_SEG(sbi, segno);
850 if (sec_usage_check(sbi, secno))
853 /* Don't touch checkpointed data */
854 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
855 if (p.alloc_mode == LFS) {
857 * LFS is set to find source section during GC.
858 * The victim should have no checkpointed data.
860 if (get_ckpt_valid_blocks(sbi, segno, true))
864 * SSR | AT_SSR are set to find target segment
865 * for writes which can be full by checkpointed
866 * and newly written blocks.
868 if (!f2fs_segment_has_free_slot(sbi, segno))
873 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
876 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
880 add_victim_entry(sbi, &p, segno);
884 cost = get_gc_cost(sbi, segno, &p);
886 if (p.min_cost > cost) {
891 if (nsearched >= p.max_search) {
892 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
893 sm->last_victim[p.gc_mode] =
894 last_victim + p.ofs_unit;
896 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
897 sm->last_victim[p.gc_mode] %=
898 (MAIN_SECS(sbi) * sbi->segs_per_sec);
903 /* get victim for GC_AT/AT_SSR */
905 lookup_victim_by_age(sbi, &p);
906 release_victim_entry(sbi);
909 if (is_atgc && p.min_segno == NULL_SEGNO &&
910 sm->elapsed_time < p.age_threshold) {
915 if (p.min_segno != NULL_SEGNO) {
917 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
919 if (p.alloc_mode == LFS) {
920 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
921 if (gc_type == FG_GC)
922 sbi->cur_victim_sec = secno;
924 set_bit(secno, dirty_i->victim_secmap);
930 if (p.min_segno != NULL_SEGNO)
931 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
933 prefree_segments(sbi), free_segments(sbi));
934 mutex_unlock(&dirty_i->seglist_lock);
939 static const struct victim_selection default_v_ops = {
940 .get_victim = get_victim_by_default,
943 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
945 struct inode_entry *ie;
947 ie = radix_tree_lookup(&gc_list->iroot, ino);
953 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
955 struct inode_entry *new_ie;
957 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
961 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
962 GFP_NOFS, true, NULL);
963 new_ie->inode = inode;
965 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
966 list_add_tail(&new_ie->list, &gc_list->ilist);
969 static void put_gc_inode(struct gc_inode_list *gc_list)
971 struct inode_entry *ie, *next_ie;
973 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
974 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
977 kmem_cache_free(f2fs_inode_entry_slab, ie);
981 static int check_valid_map(struct f2fs_sb_info *sbi,
982 unsigned int segno, int offset)
984 struct sit_info *sit_i = SIT_I(sbi);
985 struct seg_entry *sentry;
988 down_read(&sit_i->sentry_lock);
989 sentry = get_seg_entry(sbi, segno);
990 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
991 up_read(&sit_i->sentry_lock);
996 * This function compares node address got in summary with that in NAT.
997 * On validity, copy that node with cold status, otherwise (invalid node)
1000 static int gc_node_segment(struct f2fs_sb_info *sbi,
1001 struct f2fs_summary *sum, unsigned int segno, int gc_type)
1003 struct f2fs_summary *entry;
1007 bool fggc = (gc_type == FG_GC);
1009 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1011 start_addr = START_BLOCK(sbi, segno);
1016 if (fggc && phase == 2)
1017 atomic_inc(&sbi->wb_sync_req[NODE]);
1019 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1020 nid_t nid = le32_to_cpu(entry->nid);
1021 struct page *node_page;
1022 struct node_info ni;
1025 /* stop BG_GC if there is not enough free sections. */
1026 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1029 if (check_valid_map(sbi, segno, off) == 0)
1033 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1039 f2fs_ra_node_page(sbi, nid);
1044 node_page = f2fs_get_node_page(sbi, nid);
1045 if (IS_ERR(node_page))
1048 /* block may become invalid during f2fs_get_node_page */
1049 if (check_valid_map(sbi, segno, off) == 0) {
1050 f2fs_put_page(node_page, 1);
1054 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1055 f2fs_put_page(node_page, 1);
1059 if (ni.blk_addr != start_addr + off) {
1060 f2fs_put_page(node_page, 1);
1064 err = f2fs_move_node_page(node_page, gc_type);
1065 if (!err && gc_type == FG_GC)
1067 stat_inc_node_blk_count(sbi, 1, gc_type);
1074 atomic_dec(&sbi->wb_sync_req[NODE]);
1079 * Calculate start block index indicating the given node offset.
1080 * Be careful, caller should give this node offset only indicating direct node
1081 * blocks. If any node offsets, which point the other types of node blocks such
1082 * as indirect or double indirect node blocks, are given, it must be a caller's
1085 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1087 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1093 if (node_ofs <= 2) {
1094 bidx = node_ofs - 1;
1095 } else if (node_ofs <= indirect_blks) {
1096 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1098 bidx = node_ofs - 2 - dec;
1100 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1102 bidx = node_ofs - 5 - dec;
1104 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1107 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1108 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1110 struct page *node_page;
1112 unsigned int ofs_in_node, max_addrs, base;
1113 block_t source_blkaddr;
1115 nid = le32_to_cpu(sum->nid);
1116 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1118 node_page = f2fs_get_node_page(sbi, nid);
1119 if (IS_ERR(node_page))
1122 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1123 f2fs_put_page(node_page, 1);
1127 if (sum->version != dni->version) {
1128 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1130 set_sbi_flag(sbi, SBI_NEED_FSCK);
1133 if (f2fs_check_nid_range(sbi, dni->ino)) {
1134 f2fs_put_page(node_page, 1);
1138 if (IS_INODE(node_page)) {
1139 base = offset_in_addr(F2FS_INODE(node_page));
1140 max_addrs = DEF_ADDRS_PER_INODE;
1143 max_addrs = DEF_ADDRS_PER_BLOCK;
1146 if (base + ofs_in_node >= max_addrs) {
1147 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1148 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1149 f2fs_put_page(node_page, 1);
1153 *nofs = ofs_of_node(node_page);
1154 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1155 f2fs_put_page(node_page, 1);
1157 if (source_blkaddr != blkaddr) {
1158 #ifdef CONFIG_F2FS_CHECK_FS
1159 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1160 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1162 if (unlikely(check_valid_map(sbi, segno, offset))) {
1163 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1164 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1165 blkaddr, source_blkaddr, segno);
1166 set_sbi_flag(sbi, SBI_NEED_FSCK);
1175 static int ra_data_block(struct inode *inode, pgoff_t index)
1177 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1178 struct address_space *mapping = inode->i_mapping;
1179 struct dnode_of_data dn;
1181 struct extent_info ei = {0, };
1182 struct f2fs_io_info fio = {
1184 .ino = inode->i_ino,
1189 .encrypted_page = NULL,
1195 page = f2fs_grab_cache_page(mapping, index, true);
1199 if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
1200 dn.data_blkaddr = ei.blk + index - ei.fofs;
1201 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1202 DATA_GENERIC_ENHANCE_READ))) {
1203 err = -EFSCORRUPTED;
1204 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1210 set_new_dnode(&dn, inode, NULL, NULL, 0);
1211 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1214 f2fs_put_dnode(&dn);
1216 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1220 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1221 DATA_GENERIC_ENHANCE))) {
1222 err = -EFSCORRUPTED;
1223 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1229 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1232 * don't cache encrypted data into meta inode until previous dirty
1233 * data were writebacked to avoid racing between GC and flush.
1235 f2fs_wait_on_page_writeback(page, DATA, true, true);
1237 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1239 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1241 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1242 if (!fio.encrypted_page) {
1247 err = f2fs_submit_page_bio(&fio);
1249 goto put_encrypted_page;
1250 f2fs_put_page(fio.encrypted_page, 0);
1251 f2fs_put_page(page, 1);
1253 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1254 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1258 f2fs_put_page(fio.encrypted_page, 1);
1260 f2fs_put_page(page, 1);
1265 * Move data block via META_MAPPING while keeping locked data page.
1266 * This can be used to move blocks, aka LBAs, directly on disk.
1268 static int move_data_block(struct inode *inode, block_t bidx,
1269 int gc_type, unsigned int segno, int off)
1271 struct f2fs_io_info fio = {
1272 .sbi = F2FS_I_SB(inode),
1273 .ino = inode->i_ino,
1278 .encrypted_page = NULL,
1282 struct dnode_of_data dn;
1283 struct f2fs_summary sum;
1284 struct node_info ni;
1285 struct page *page, *mpage;
1288 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1289 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1290 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1291 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1293 /* do not read out */
1294 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1298 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1303 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1307 set_new_dnode(&dn, inode, NULL, NULL, 0);
1308 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1312 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1313 ClearPageUptodate(page);
1319 * don't cache encrypted data into meta inode until previous dirty
1320 * data were writebacked to avoid racing between GC and flush.
1322 f2fs_wait_on_page_writeback(page, DATA, true, true);
1324 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1326 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1332 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1335 f2fs_down_write(&fio.sbi->io_order_lock);
1337 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1338 fio.old_blkaddr, false);
1344 fio.encrypted_page = mpage;
1346 /* read source block in mpage */
1347 if (!PageUptodate(mpage)) {
1348 err = f2fs_submit_page_bio(&fio);
1350 f2fs_put_page(mpage, 1);
1354 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1356 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1360 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1361 !PageUptodate(mpage))) {
1363 f2fs_put_page(mpage, 1);
1368 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1370 /* allocate block address */
1371 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1374 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1375 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1376 if (!fio.encrypted_page) {
1378 f2fs_put_page(mpage, 1);
1382 /* write target block */
1383 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1384 memcpy(page_address(fio.encrypted_page),
1385 page_address(mpage), PAGE_SIZE);
1386 f2fs_put_page(mpage, 1);
1387 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1388 fio.old_blkaddr, fio.old_blkaddr);
1389 f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
1391 set_page_dirty(fio.encrypted_page);
1392 if (clear_page_dirty_for_io(fio.encrypted_page))
1393 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1395 set_page_writeback(fio.encrypted_page);
1396 ClearPageError(page);
1398 fio.op = REQ_OP_WRITE;
1399 fio.op_flags = REQ_SYNC;
1400 fio.new_blkaddr = newaddr;
1401 f2fs_submit_page_write(&fio);
1404 if (PageWriteback(fio.encrypted_page))
1405 end_page_writeback(fio.encrypted_page);
1409 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1411 f2fs_update_data_blkaddr(&dn, newaddr);
1412 set_inode_flag(inode, FI_APPEND_WRITE);
1414 f2fs_put_page(fio.encrypted_page, 1);
1417 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1421 f2fs_up_write(&fio.sbi->io_order_lock);
1423 f2fs_put_dnode(&dn);
1425 f2fs_put_page(page, 1);
1429 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1430 unsigned int segno, int off)
1435 page = f2fs_get_lock_data_page(inode, bidx, true);
1437 return PTR_ERR(page);
1439 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1444 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1448 if (gc_type == BG_GC) {
1449 if (PageWriteback(page)) {
1453 set_page_dirty(page);
1454 set_page_private_gcing(page);
1456 struct f2fs_io_info fio = {
1457 .sbi = F2FS_I_SB(inode),
1458 .ino = inode->i_ino,
1462 .op_flags = REQ_SYNC,
1463 .old_blkaddr = NULL_ADDR,
1465 .encrypted_page = NULL,
1466 .need_lock = LOCK_REQ,
1467 .io_type = FS_GC_DATA_IO,
1469 bool is_dirty = PageDirty(page);
1472 f2fs_wait_on_page_writeback(page, DATA, true, true);
1474 set_page_dirty(page);
1475 if (clear_page_dirty_for_io(page)) {
1476 inode_dec_dirty_pages(inode);
1477 f2fs_remove_dirty_inode(inode);
1480 set_page_private_gcing(page);
1482 err = f2fs_do_write_data_page(&fio);
1484 clear_page_private_gcing(page);
1485 if (err == -ENOMEM) {
1486 memalloc_retry_wait(GFP_NOFS);
1490 set_page_dirty(page);
1494 f2fs_put_page(page, 1);
1499 * This function tries to get parent node of victim data block, and identifies
1500 * data block validity. If the block is valid, copy that with cold status and
1501 * modify parent node.
1502 * If the parent node is not valid or the data block address is different,
1503 * the victim data block is ignored.
1505 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1506 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1509 struct super_block *sb = sbi->sb;
1510 struct f2fs_summary *entry;
1515 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1517 start_addr = START_BLOCK(sbi, segno);
1522 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1523 struct page *data_page;
1524 struct inode *inode;
1525 struct node_info dni; /* dnode info for the data */
1526 unsigned int ofs_in_node, nofs;
1528 nid_t nid = le32_to_cpu(entry->nid);
1531 * stop BG_GC if there is not enough free sections.
1532 * Or, stop GC if the segment becomes fully valid caused by
1533 * race condition along with SSR block allocation.
1535 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1536 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1537 CAP_BLKS_PER_SEC(sbi)))
1540 if (check_valid_map(sbi, segno, off) == 0)
1544 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1550 f2fs_ra_node_page(sbi, nid);
1554 /* Get an inode by ino with checking validity */
1555 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1559 f2fs_ra_node_page(sbi, dni.ino);
1563 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1568 inode = f2fs_iget(sb, dni.ino);
1569 if (IS_ERR(inode) || is_bad_inode(inode) ||
1570 special_file(inode->i_mode))
1573 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1574 if (err == -EAGAIN) {
1579 if (!f2fs_down_write_trylock(
1580 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1582 sbi->skipped_gc_rwsem++;
1586 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1589 if (f2fs_post_read_required(inode)) {
1590 int err = ra_data_block(inode, start_bidx);
1592 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1597 add_gc_inode(gc_list, inode);
1601 data_page = f2fs_get_read_data_page(inode, start_bidx,
1602 REQ_RAHEAD, true, NULL);
1603 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1604 if (IS_ERR(data_page)) {
1609 f2fs_put_page(data_page, 0);
1610 add_gc_inode(gc_list, inode);
1615 inode = find_gc_inode(gc_list, dni.ino);
1617 struct f2fs_inode_info *fi = F2FS_I(inode);
1618 bool locked = false;
1621 if (S_ISREG(inode->i_mode)) {
1622 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
1623 sbi->skipped_gc_rwsem++;
1626 if (!f2fs_down_write_trylock(
1627 &fi->i_gc_rwsem[WRITE])) {
1628 sbi->skipped_gc_rwsem++;
1629 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1634 /* wait for all inflight aio data */
1635 inode_dio_wait(inode);
1638 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1640 if (f2fs_post_read_required(inode))
1641 err = move_data_block(inode, start_bidx,
1642 gc_type, segno, off);
1644 err = move_data_page(inode, start_bidx, gc_type,
1647 if (!err && (gc_type == FG_GC ||
1648 f2fs_post_read_required(inode)))
1652 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1653 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1656 stat_inc_data_blk_count(sbi, 1, gc_type);
1666 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1669 struct sit_info *sit_i = SIT_I(sbi);
1672 down_write(&sit_i->sentry_lock);
1673 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1674 NO_CHECK_TYPE, LFS, 0);
1675 up_write(&sit_i->sentry_lock);
1679 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1680 unsigned int start_segno,
1681 struct gc_inode_list *gc_list, int gc_type,
1684 struct page *sum_page;
1685 struct f2fs_summary_block *sum;
1686 struct blk_plug plug;
1687 unsigned int segno = start_segno;
1688 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1689 int seg_freed = 0, migrated = 0;
1690 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1691 SUM_TYPE_DATA : SUM_TYPE_NODE;
1694 if (__is_large_section(sbi))
1695 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1698 * zone-capacity can be less than zone-size in zoned devices,
1699 * resulting in less than expected usable segments in the zone,
1700 * calculate the end segno in the zone which can be garbage collected
1702 if (f2fs_sb_has_blkzoned(sbi))
1703 end_segno -= sbi->segs_per_sec -
1704 f2fs_usable_segs_in_sec(sbi, segno);
1706 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1708 /* readahead multi ssa blocks those have contiguous address */
1709 if (__is_large_section(sbi))
1710 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1711 end_segno - segno, META_SSA, true);
1713 /* reference all summary page */
1714 while (segno < end_segno) {
1715 sum_page = f2fs_get_sum_page(sbi, segno++);
1716 if (IS_ERR(sum_page)) {
1717 int err = PTR_ERR(sum_page);
1719 end_segno = segno - 1;
1720 for (segno = start_segno; segno < end_segno; segno++) {
1721 sum_page = find_get_page(META_MAPPING(sbi),
1722 GET_SUM_BLOCK(sbi, segno));
1723 f2fs_put_page(sum_page, 0);
1724 f2fs_put_page(sum_page, 0);
1728 unlock_page(sum_page);
1731 blk_start_plug(&plug);
1733 for (segno = start_segno; segno < end_segno; segno++) {
1735 /* find segment summary of victim */
1736 sum_page = find_get_page(META_MAPPING(sbi),
1737 GET_SUM_BLOCK(sbi, segno));
1738 f2fs_put_page(sum_page, 0);
1740 if (get_valid_blocks(sbi, segno, false) == 0)
1742 if (gc_type == BG_GC && __is_large_section(sbi) &&
1743 migrated >= sbi->migration_granularity)
1745 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1748 sum = page_address(sum_page);
1749 if (type != GET_SUM_TYPE((&sum->footer))) {
1750 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1751 segno, type, GET_SUM_TYPE((&sum->footer)));
1752 set_sbi_flag(sbi, SBI_NEED_FSCK);
1753 f2fs_stop_checkpoint(sbi, false,
1754 STOP_CP_REASON_CORRUPTED_SUMMARY);
1759 * this is to avoid deadlock:
1760 * - lock_page(sum_page) - f2fs_replace_block
1761 * - check_valid_map() - down_write(sentry_lock)
1762 * - down_read(sentry_lock) - change_curseg()
1763 * - lock_page(sum_page)
1765 if (type == SUM_TYPE_NODE)
1766 submitted += gc_node_segment(sbi, sum->entries, segno,
1769 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1773 stat_inc_seg_count(sbi, type, gc_type);
1774 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1778 if (gc_type == FG_GC &&
1779 get_valid_blocks(sbi, segno, false) == 0)
1782 if (__is_large_section(sbi))
1783 sbi->next_victim_seg[gc_type] =
1784 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1786 f2fs_put_page(sum_page, 0);
1790 f2fs_submit_merged_write(sbi,
1791 (type == SUM_TYPE_NODE) ? NODE : DATA);
1793 blk_finish_plug(&plug);
1795 stat_inc_call_count(sbi->stat_info);
1800 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1802 int gc_type = gc_control->init_gc_type;
1803 unsigned int segno = gc_control->victim_segno;
1804 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1806 struct cp_control cpc;
1807 struct gc_inode_list gc_list = {
1808 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1809 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1811 unsigned int skipped_round = 0, round = 0;
1812 unsigned int upper_secs;
1814 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1815 gc_control->nr_free_secs,
1816 get_pages(sbi, F2FS_DIRTY_NODES),
1817 get_pages(sbi, F2FS_DIRTY_DENTS),
1818 get_pages(sbi, F2FS_DIRTY_IMETA),
1821 reserved_segments(sbi),
1822 prefree_segments(sbi));
1824 cpc.reason = __get_cp_reason(sbi);
1826 sbi->skipped_gc_rwsem = 0;
1827 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1831 if (unlikely(f2fs_cp_error(sbi))) {
1836 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1838 * For example, if there are many prefree_segments below given
1839 * threshold, we can make them free by checkpoint. Then, we
1840 * secure free segments which doesn't need fggc any more.
1842 if (prefree_segments(sbi)) {
1843 ret = f2fs_write_checkpoint(sbi, &cpc);
1847 if (has_not_enough_free_secs(sbi, 0, 0))
1851 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1852 if (gc_type == BG_GC && gc_control->no_bg_gc) {
1857 ret = __get_victim(sbi, &segno, gc_type);
1859 /* allow to search victim from sections has pinned data */
1860 if (ret == -ENODATA && gc_type == FG_GC &&
1861 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1862 f2fs_unpin_all_sections(sbi, false);
1868 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1869 gc_control->should_migrate_blocks);
1870 total_freed += seg_freed;
1872 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1875 if (gc_type == FG_GC)
1876 sbi->cur_victim_sec = NULL_SEGNO;
1878 if (gc_control->init_gc_type == FG_GC ||
1879 !has_not_enough_free_secs(sbi,
1880 (gc_type == FG_GC) ? sec_freed : 0, 0)) {
1881 if (gc_type == FG_GC && sec_freed < gc_control->nr_free_secs)
1886 /* FG_GC stops GC by skip_count */
1887 if (gc_type == FG_GC) {
1888 if (sbi->skipped_gc_rwsem)
1891 if (skipped_round > MAX_SKIP_GC_COUNT &&
1892 skipped_round * 2 >= round) {
1893 ret = f2fs_write_checkpoint(sbi, &cpc);
1898 __get_secs_required(sbi, NULL, &upper_secs, NULL);
1901 * Write checkpoint to reclaim prefree segments.
1902 * We need more three extra sections for writer's data/node/dentry.
1904 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1905 prefree_segments(sbi)) {
1906 ret = f2fs_write_checkpoint(sbi, &cpc);
1915 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1916 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1918 if (gc_type == FG_GC)
1919 f2fs_unpin_all_sections(sbi, true);
1921 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1922 get_pages(sbi, F2FS_DIRTY_NODES),
1923 get_pages(sbi, F2FS_DIRTY_DENTS),
1924 get_pages(sbi, F2FS_DIRTY_IMETA),
1927 reserved_segments(sbi),
1928 prefree_segments(sbi));
1930 f2fs_up_write(&sbi->gc_lock);
1932 put_gc_inode(&gc_list);
1934 if (gc_control->err_gc_skipped && !ret)
1935 ret = sec_freed ? 0 : -EAGAIN;
1939 int __init f2fs_create_garbage_collection_cache(void)
1941 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1942 sizeof(struct victim_entry));
1943 if (!victim_entry_slab)
1948 void f2fs_destroy_garbage_collection_cache(void)
1950 kmem_cache_destroy(victim_entry_slab);
1953 static void init_atgc_management(struct f2fs_sb_info *sbi)
1955 struct atgc_management *am = &sbi->am;
1957 if (test_opt(sbi, ATGC) &&
1958 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1959 am->atgc_enabled = true;
1961 am->root = RB_ROOT_CACHED;
1962 INIT_LIST_HEAD(&am->victim_list);
1963 am->victim_count = 0;
1965 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1966 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1967 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1968 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1971 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1973 DIRTY_I(sbi)->v_ops = &default_v_ops;
1975 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1977 /* give warm/cold data area from slower device */
1978 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1979 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1980 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1982 init_atgc_management(sbi);
1985 static int free_segment_range(struct f2fs_sb_info *sbi,
1986 unsigned int secs, bool gc_only)
1988 unsigned int segno, next_inuse, start, end;
1989 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1990 int gc_mode, gc_type;
1994 /* Force block allocation for GC */
1995 MAIN_SECS(sbi) -= secs;
1996 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1997 end = MAIN_SEGS(sbi) - 1;
1999 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2000 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2001 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2002 SIT_I(sbi)->last_victim[gc_mode] = 0;
2004 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2005 if (sbi->next_victim_seg[gc_type] >= start)
2006 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2007 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2009 /* Move out cursegs from the target range */
2010 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
2011 f2fs_allocate_segment_for_resize(sbi, type, start, end);
2013 /* do GC to move out valid blocks in the range */
2014 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
2015 struct gc_inode_list gc_list = {
2016 .ilist = LIST_HEAD_INIT(gc_list.ilist),
2017 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2020 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
2021 put_gc_inode(&gc_list);
2023 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
2027 if (fatal_signal_pending(current)) {
2035 err = f2fs_write_checkpoint(sbi, &cpc);
2039 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2040 if (next_inuse <= end) {
2041 f2fs_err(sbi, "segno %u should be free but still inuse!",
2043 f2fs_bug_on(sbi, 1);
2046 MAIN_SECS(sbi) += secs;
2050 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2052 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2055 int segment_count_main;
2056 long long block_count;
2057 int segs = secs * sbi->segs_per_sec;
2059 f2fs_down_write(&sbi->sb_lock);
2061 section_count = le32_to_cpu(raw_sb->section_count);
2062 segment_count = le32_to_cpu(raw_sb->segment_count);
2063 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2064 block_count = le64_to_cpu(raw_sb->block_count);
2066 raw_sb->section_count = cpu_to_le32(section_count + secs);
2067 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2068 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2069 raw_sb->block_count = cpu_to_le64(block_count +
2070 (long long)segs * sbi->blocks_per_seg);
2071 if (f2fs_is_multi_device(sbi)) {
2072 int last_dev = sbi->s_ndevs - 1;
2074 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2076 raw_sb->devs[last_dev].total_segments =
2077 cpu_to_le32(dev_segs + segs);
2080 f2fs_up_write(&sbi->sb_lock);
2083 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2085 int segs = secs * sbi->segs_per_sec;
2086 long long blks = (long long)segs * sbi->blocks_per_seg;
2087 long long user_block_count =
2088 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2090 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2091 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2092 MAIN_SECS(sbi) += secs;
2093 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2094 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2095 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2097 if (f2fs_is_multi_device(sbi)) {
2098 int last_dev = sbi->s_ndevs - 1;
2100 FDEV(last_dev).total_segments =
2101 (int)FDEV(last_dev).total_segments + segs;
2102 FDEV(last_dev).end_blk =
2103 (long long)FDEV(last_dev).end_blk + blks;
2104 #ifdef CONFIG_BLK_DEV_ZONED
2105 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
2106 (int)(blks >> sbi->log_blocks_per_blkz);
2111 int f2fs_resize_fs(struct file *filp, __u64 block_count)
2113 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2114 __u64 old_block_count, shrunk_blocks;
2115 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2120 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2121 if (block_count > old_block_count)
2124 if (f2fs_is_multi_device(sbi)) {
2125 int last_dev = sbi->s_ndevs - 1;
2126 __u64 last_segs = FDEV(last_dev).total_segments;
2128 if (block_count + last_segs * sbi->blocks_per_seg <=
2133 /* new fs size should align to section size */
2134 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2138 if (block_count == old_block_count)
2141 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2142 f2fs_err(sbi, "Should run fsck to repair first.");
2143 return -EFSCORRUPTED;
2146 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2147 f2fs_err(sbi, "Checkpoint should be enabled.");
2151 err = mnt_want_write_file(filp);
2155 shrunk_blocks = old_block_count - block_count;
2156 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2159 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2161 goto out_drop_write;
2164 /* stop CP to protect MAIN_SEC in free_segment_range */
2167 spin_lock(&sbi->stat_lock);
2168 if (shrunk_blocks + valid_user_blocks(sbi) +
2169 sbi->current_reserved_blocks + sbi->unusable_block_count +
2170 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2172 spin_unlock(&sbi->stat_lock);
2177 err = free_segment_range(sbi, secs, true);
2180 f2fs_unlock_op(sbi);
2181 f2fs_up_write(&sbi->gc_lock);
2183 mnt_drop_write_file(filp);
2187 err = freeze_super(sbi->sb);
2191 if (f2fs_readonly(sbi->sb)) {
2192 thaw_super(sbi->sb);
2196 f2fs_down_write(&sbi->gc_lock);
2197 f2fs_down_write(&sbi->cp_global_sem);
2199 spin_lock(&sbi->stat_lock);
2200 if (shrunk_blocks + valid_user_blocks(sbi) +
2201 sbi->current_reserved_blocks + sbi->unusable_block_count +
2202 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2205 sbi->user_block_count -= shrunk_blocks;
2206 spin_unlock(&sbi->stat_lock);
2210 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2211 err = free_segment_range(sbi, secs, false);
2215 update_sb_metadata(sbi, -secs);
2217 err = f2fs_commit_super(sbi, false);
2219 update_sb_metadata(sbi, secs);
2223 update_fs_metadata(sbi, -secs);
2224 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2225 set_sbi_flag(sbi, SBI_IS_DIRTY);
2227 err = f2fs_write_checkpoint(sbi, &cpc);
2229 update_fs_metadata(sbi, secs);
2230 update_sb_metadata(sbi, secs);
2231 f2fs_commit_super(sbi, false);
2234 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2236 set_sbi_flag(sbi, SBI_NEED_FSCK);
2237 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2239 spin_lock(&sbi->stat_lock);
2240 sbi->user_block_count += shrunk_blocks;
2241 spin_unlock(&sbi->stat_lock);
2244 f2fs_up_write(&sbi->cp_global_sem);
2245 f2fs_up_write(&sbi->gc_lock);
2246 thaw_super(sbi->sb);