4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
24 #include <trace/events/f2fs.h>
26 static int gc_thread_func(void *data)
28 struct f2fs_sb_info *sbi = data;
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
33 wait_ms = gc_th->min_sleep_time;
39 wait_event_interruptible_timeout(*wq,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms));
42 if (kthread_should_stop())
45 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
46 increase_sleep_time(gc_th, &wait_ms);
50 #ifdef CONFIG_F2FS_FAULT_INJECTION
51 if (time_to_inject(sbi, FAULT_CHECKPOINT))
52 f2fs_stop_checkpoint(sbi, false);
56 * [GC triggering condition]
57 * 0. GC is not conducted currently.
58 * 1. There are enough dirty segments.
59 * 2. IO subsystem is idle by checking the # of writeback pages.
60 * 3. IO subsystem is idle by checking the # of requests in
61 * bdev's request list.
63 * Note) We have to avoid triggering GCs frequently.
64 * Because it is possible that some segments can be
65 * invalidated soon after by user update or deletion.
66 * So, I'd like to wait some time to collect dirty segments.
68 if (!mutex_trylock(&sbi->gc_mutex))
72 increase_sleep_time(gc_th, &wait_ms);
73 mutex_unlock(&sbi->gc_mutex);
77 if (has_enough_invalid_blocks(sbi))
78 decrease_sleep_time(gc_th, &wait_ms);
80 increase_sleep_time(gc_th, &wait_ms);
82 stat_inc_bggc_count(sbi);
84 /* if return value is not zero, no victim was selected */
85 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
86 wait_ms = gc_th->no_gc_sleep_time;
88 trace_f2fs_background_gc(sbi->sb, wait_ms,
89 prefree_segments(sbi), free_segments(sbi));
91 /* balancing f2fs's metadata periodically */
92 f2fs_balance_fs_bg(sbi);
94 } while (!kthread_should_stop());
98 int start_gc_thread(struct f2fs_sb_info *sbi)
100 struct f2fs_gc_kthread *gc_th;
101 dev_t dev = sbi->sb->s_bdev->bd_dev;
104 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
110 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
111 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
112 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
116 sbi->gc_thread = gc_th;
117 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
118 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
119 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
120 if (IS_ERR(gc_th->f2fs_gc_task)) {
121 err = PTR_ERR(gc_th->f2fs_gc_task);
123 sbi->gc_thread = NULL;
129 void stop_gc_thread(struct f2fs_sb_info *sbi)
131 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
134 kthread_stop(gc_th->f2fs_gc_task);
136 sbi->gc_thread = NULL;
139 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
141 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
143 if (gc_th && gc_th->gc_idle) {
144 if (gc_th->gc_idle == 1)
146 else if (gc_th->gc_idle == 2)
152 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
153 int type, struct victim_sel_policy *p)
155 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
157 if (p->alloc_mode == SSR) {
158 p->gc_mode = GC_GREEDY;
159 p->dirty_segmap = dirty_i->dirty_segmap[type];
160 p->max_search = dirty_i->nr_dirty[type];
163 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
164 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
165 p->max_search = dirty_i->nr_dirty[DIRTY];
166 p->ofs_unit = sbi->segs_per_sec;
169 /* we need to check every dirty segments in the FG_GC case */
170 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
171 p->max_search = sbi->max_victim_search;
173 p->offset = sbi->last_victim[p->gc_mode];
176 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
177 struct victim_sel_policy *p)
179 /* SSR allocates in a segment unit */
180 if (p->alloc_mode == SSR)
181 return sbi->blocks_per_seg;
182 if (p->gc_mode == GC_GREEDY)
183 return sbi->blocks_per_seg * p->ofs_unit;
184 else if (p->gc_mode == GC_CB)
186 else /* No other gc_mode */
190 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
192 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
196 * If the gc_type is FG_GC, we can select victim segments
197 * selected by background GC before.
198 * Those segments guarantee they have small valid blocks.
200 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
201 if (sec_usage_check(sbi, secno))
204 if (no_fggc_candidate(sbi, secno))
207 clear_bit(secno, dirty_i->victim_secmap);
208 return secno * sbi->segs_per_sec;
213 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
215 struct sit_info *sit_i = SIT_I(sbi);
216 unsigned int secno = GET_SECNO(sbi, segno);
217 unsigned int start = secno * sbi->segs_per_sec;
218 unsigned long long mtime = 0;
219 unsigned int vblocks;
220 unsigned char age = 0;
224 for (i = 0; i < sbi->segs_per_sec; i++)
225 mtime += get_seg_entry(sbi, start + i)->mtime;
226 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
228 mtime = div_u64(mtime, sbi->segs_per_sec);
229 vblocks = div_u64(vblocks, sbi->segs_per_sec);
231 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
233 /* Handle if the system time has changed by the user */
234 if (mtime < sit_i->min_mtime)
235 sit_i->min_mtime = mtime;
236 if (mtime > sit_i->max_mtime)
237 sit_i->max_mtime = mtime;
238 if (sit_i->max_mtime != sit_i->min_mtime)
239 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
240 sit_i->max_mtime - sit_i->min_mtime);
242 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
245 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
246 unsigned int segno, struct victim_sel_policy *p)
248 if (p->alloc_mode == SSR)
249 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
251 /* alloc_mode == LFS */
252 if (p->gc_mode == GC_GREEDY)
253 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
255 return get_cb_cost(sbi, segno);
258 static unsigned int count_bits(const unsigned long *addr,
259 unsigned int offset, unsigned int len)
261 unsigned int end = offset + len, sum = 0;
263 while (offset < end) {
264 if (test_bit(offset++, addr))
271 * This function is called from two paths.
272 * One is garbage collection and the other is SSR segment selection.
273 * When it is called during GC, it just gets a victim segment
274 * and it does not remove it from dirty seglist.
275 * When it is called from SSR segment selection, it finds a segment
276 * which has minimum valid blocks and removes it from dirty seglist.
278 static int get_victim_by_default(struct f2fs_sb_info *sbi,
279 unsigned int *result, int gc_type, int type, char alloc_mode)
281 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
282 struct victim_sel_policy p;
283 unsigned int secno, last_victim;
284 unsigned int last_segment = MAIN_SEGS(sbi);
285 unsigned int nsearched = 0;
287 mutex_lock(&dirty_i->seglist_lock);
289 p.alloc_mode = alloc_mode;
290 select_policy(sbi, gc_type, type, &p);
292 p.min_segno = NULL_SEGNO;
293 p.min_cost = get_max_cost(sbi, &p);
295 if (p.max_search == 0)
298 last_victim = sbi->last_victim[p.gc_mode];
299 if (p.alloc_mode == LFS && gc_type == FG_GC) {
300 p.min_segno = check_bg_victims(sbi);
301 if (p.min_segno != NULL_SEGNO)
309 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
310 if (segno >= last_segment) {
311 if (sbi->last_victim[p.gc_mode]) {
312 last_segment = sbi->last_victim[p.gc_mode];
313 sbi->last_victim[p.gc_mode] = 0;
320 p.offset = segno + p.ofs_unit;
321 if (p.ofs_unit > 1) {
322 p.offset -= segno % p.ofs_unit;
323 nsearched += count_bits(p.dirty_segmap,
324 p.offset - p.ofs_unit,
330 secno = GET_SECNO(sbi, segno);
332 if (sec_usage_check(sbi, secno))
334 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
336 if (gc_type == FG_GC && p.alloc_mode == LFS &&
337 no_fggc_candidate(sbi, secno))
340 cost = get_gc_cost(sbi, segno, &p);
342 if (p.min_cost > cost) {
347 if (nsearched >= p.max_search) {
348 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
349 sbi->last_victim[p.gc_mode] = last_victim + 1;
351 sbi->last_victim[p.gc_mode] = segno + 1;
355 if (p.min_segno != NULL_SEGNO) {
357 if (p.alloc_mode == LFS) {
358 secno = GET_SECNO(sbi, p.min_segno);
359 if (gc_type == FG_GC)
360 sbi->cur_victim_sec = secno;
362 set_bit(secno, dirty_i->victim_secmap);
364 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
366 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
368 prefree_segments(sbi), free_segments(sbi));
371 mutex_unlock(&dirty_i->seglist_lock);
373 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
376 static const struct victim_selection default_v_ops = {
377 .get_victim = get_victim_by_default,
380 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
382 struct inode_entry *ie;
384 ie = radix_tree_lookup(&gc_list->iroot, ino);
390 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
392 struct inode_entry *new_ie;
394 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
398 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
399 new_ie->inode = inode;
401 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
402 list_add_tail(&new_ie->list, &gc_list->ilist);
405 static void put_gc_inode(struct gc_inode_list *gc_list)
407 struct inode_entry *ie, *next_ie;
408 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
409 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
412 kmem_cache_free(inode_entry_slab, ie);
416 static int check_valid_map(struct f2fs_sb_info *sbi,
417 unsigned int segno, int offset)
419 struct sit_info *sit_i = SIT_I(sbi);
420 struct seg_entry *sentry;
423 mutex_lock(&sit_i->sentry_lock);
424 sentry = get_seg_entry(sbi, segno);
425 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
426 mutex_unlock(&sit_i->sentry_lock);
431 * This function compares node address got in summary with that in NAT.
432 * On validity, copy that node with cold status, otherwise (invalid node)
435 static void gc_node_segment(struct f2fs_sb_info *sbi,
436 struct f2fs_summary *sum, unsigned int segno, int gc_type)
438 struct f2fs_summary *entry;
443 start_addr = START_BLOCK(sbi, segno);
448 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
449 nid_t nid = le32_to_cpu(entry->nid);
450 struct page *node_page;
453 /* stop BG_GC if there is not enough free sections. */
454 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
457 if (check_valid_map(sbi, segno, off) == 0)
461 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
467 ra_node_page(sbi, nid);
472 node_page = get_node_page(sbi, nid);
473 if (IS_ERR(node_page))
476 /* block may become invalid during get_node_page */
477 if (check_valid_map(sbi, segno, off) == 0) {
478 f2fs_put_page(node_page, 1);
482 get_node_info(sbi, nid, &ni);
483 if (ni.blk_addr != start_addr + off) {
484 f2fs_put_page(node_page, 1);
488 move_node_page(node_page, gc_type);
489 stat_inc_node_blk_count(sbi, 1, gc_type);
497 * Calculate start block index indicating the given node offset.
498 * Be careful, caller should give this node offset only indicating direct node
499 * blocks. If any node offsets, which point the other types of node blocks such
500 * as indirect or double indirect node blocks, are given, it must be a caller's
503 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
505 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
513 } else if (node_ofs <= indirect_blks) {
514 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
515 bidx = node_ofs - 2 - dec;
517 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
518 bidx = node_ofs - 5 - dec;
520 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
523 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
524 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
526 struct page *node_page;
528 unsigned int ofs_in_node;
529 block_t source_blkaddr;
531 nid = le32_to_cpu(sum->nid);
532 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
534 node_page = get_node_page(sbi, nid);
535 if (IS_ERR(node_page))
538 get_node_info(sbi, nid, dni);
540 if (sum->version != dni->version) {
541 f2fs_msg(sbi->sb, KERN_WARNING,
542 "%s: valid data with mismatched node version.",
544 set_sbi_flag(sbi, SBI_NEED_FSCK);
547 *nofs = ofs_of_node(node_page);
548 source_blkaddr = datablock_addr(node_page, ofs_in_node);
549 f2fs_put_page(node_page, 1);
551 if (source_blkaddr != blkaddr)
556 static void move_encrypted_block(struct inode *inode, block_t bidx)
558 struct f2fs_io_info fio = {
559 .sbi = F2FS_I_SB(inode),
562 .op_flags = READ_SYNC,
563 .encrypted_page = NULL,
565 struct dnode_of_data dn;
566 struct f2fs_summary sum;
572 /* do not read out */
573 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
577 set_new_dnode(&dn, inode, NULL, NULL, 0);
578 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
582 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
583 ClearPageUptodate(page);
588 * don't cache encrypted data into meta inode until previous dirty
589 * data were writebacked to avoid racing between GC and flush.
591 f2fs_wait_on_page_writeback(page, DATA, true);
593 get_node_info(fio.sbi, dn.nid, &ni);
594 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
598 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
600 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
601 &sum, CURSEG_COLD_DATA);
603 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
604 FGP_LOCK | FGP_CREAT, GFP_NOFS);
605 if (!fio.encrypted_page) {
610 err = f2fs_submit_page_bio(&fio);
615 lock_page(fio.encrypted_page);
617 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
621 if (unlikely(!PageUptodate(fio.encrypted_page))) {
626 set_page_dirty(fio.encrypted_page);
627 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
628 if (clear_page_dirty_for_io(fio.encrypted_page))
629 dec_page_count(fio.sbi, F2FS_DIRTY_META);
631 set_page_writeback(fio.encrypted_page);
633 /* allocate block address */
634 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
636 fio.op = REQ_OP_WRITE;
637 fio.op_flags = WRITE_SYNC;
638 fio.new_blkaddr = newaddr;
639 f2fs_submit_page_mbio(&fio);
641 f2fs_update_data_blkaddr(&dn, newaddr);
642 set_inode_flag(inode, FI_APPEND_WRITE);
643 if (page->index == 0)
644 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
646 f2fs_put_page(fio.encrypted_page, 1);
649 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
654 f2fs_put_page(page, 1);
657 static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
661 page = get_lock_data_page(inode, bidx, true);
665 if (gc_type == BG_GC) {
666 if (PageWriteback(page))
668 set_page_dirty(page);
671 struct f2fs_io_info fio = {
672 .sbi = F2FS_I_SB(inode),
675 .op_flags = WRITE_SYNC,
677 .encrypted_page = NULL,
679 bool is_dirty = PageDirty(page);
683 set_page_dirty(page);
684 f2fs_wait_on_page_writeback(page, DATA, true);
685 if (clear_page_dirty_for_io(page))
686 inode_dec_dirty_pages(inode);
690 err = do_write_data_page(&fio);
692 clear_cold_data(page);
693 if (err == -ENOMEM) {
694 congestion_wait(BLK_RW_ASYNC, HZ/50);
698 set_page_dirty(page);
701 clear_cold_data(page);
704 f2fs_put_page(page, 1);
708 * This function tries to get parent node of victim data block, and identifies
709 * data block validity. If the block is valid, copy that with cold status and
710 * modify parent node.
711 * If the parent node is not valid or the data block address is different,
712 * the victim data block is ignored.
714 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
715 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
717 struct super_block *sb = sbi->sb;
718 struct f2fs_summary *entry;
723 start_addr = START_BLOCK(sbi, segno);
728 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
729 struct page *data_page;
731 struct node_info dni; /* dnode info for the data */
732 unsigned int ofs_in_node, nofs;
734 nid_t nid = le32_to_cpu(entry->nid);
736 /* stop BG_GC if there is not enough free sections. */
737 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
740 if (check_valid_map(sbi, segno, off) == 0)
744 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
750 ra_node_page(sbi, nid);
754 /* Get an inode by ino with checking validity */
755 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
759 ra_node_page(sbi, dni.ino);
763 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
766 inode = f2fs_iget(sb, dni.ino);
767 if (IS_ERR(inode) || is_bad_inode(inode))
770 /* if encrypted inode, let's go phase 3 */
771 if (f2fs_encrypted_inode(inode) &&
772 S_ISREG(inode->i_mode)) {
773 add_gc_inode(gc_list, inode);
777 start_bidx = start_bidx_of_node(nofs, inode);
778 data_page = get_read_data_page(inode,
779 start_bidx + ofs_in_node, REQ_RAHEAD,
781 if (IS_ERR(data_page)) {
786 f2fs_put_page(data_page, 0);
787 add_gc_inode(gc_list, inode);
792 inode = find_gc_inode(gc_list, dni.ino);
794 struct f2fs_inode_info *fi = F2FS_I(inode);
797 if (S_ISREG(inode->i_mode)) {
798 if (!down_write_trylock(&fi->dio_rwsem[READ]))
800 if (!down_write_trylock(
801 &fi->dio_rwsem[WRITE])) {
802 up_write(&fi->dio_rwsem[READ]);
808 start_bidx = start_bidx_of_node(nofs, inode)
810 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
811 move_encrypted_block(inode, start_bidx);
813 move_data_page(inode, start_bidx, gc_type);
816 up_write(&fi->dio_rwsem[WRITE]);
817 up_write(&fi->dio_rwsem[READ]);
820 stat_inc_data_blk_count(sbi, 1, gc_type);
828 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
831 struct sit_info *sit_i = SIT_I(sbi);
834 mutex_lock(&sit_i->sentry_lock);
835 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
837 mutex_unlock(&sit_i->sentry_lock);
841 static int do_garbage_collect(struct f2fs_sb_info *sbi,
842 unsigned int start_segno,
843 struct gc_inode_list *gc_list, int gc_type)
845 struct page *sum_page;
846 struct f2fs_summary_block *sum;
847 struct blk_plug plug;
848 unsigned int segno = start_segno;
849 unsigned int end_segno = start_segno + sbi->segs_per_sec;
851 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
852 SUM_TYPE_DATA : SUM_TYPE_NODE;
854 /* readahead multi ssa blocks those have contiguous address */
855 if (sbi->segs_per_sec > 1)
856 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
857 sbi->segs_per_sec, META_SSA, true);
859 /* reference all summary page */
860 while (segno < end_segno) {
861 sum_page = get_sum_page(sbi, segno++);
862 unlock_page(sum_page);
865 blk_start_plug(&plug);
867 for (segno = start_segno; segno < end_segno; segno++) {
869 /* find segment summary of victim */
870 sum_page = find_get_page(META_MAPPING(sbi),
871 GET_SUM_BLOCK(sbi, segno));
872 f2fs_put_page(sum_page, 0);
874 if (get_valid_blocks(sbi, segno, 1) == 0 ||
875 !PageUptodate(sum_page) ||
876 unlikely(f2fs_cp_error(sbi)))
879 sum = page_address(sum_page);
880 if (type != GET_SUM_TYPE((&sum->footer))) {
881 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
882 "type [%d, %d] in SSA and SIT",
883 segno, type, GET_SUM_TYPE((&sum->footer)));
884 set_sbi_flag(sbi, SBI_NEED_FSCK);
889 * this is to avoid deadlock:
890 * - lock_page(sum_page) - f2fs_replace_block
891 * - check_valid_map() - mutex_lock(sentry_lock)
892 * - mutex_lock(sentry_lock) - change_curseg()
893 * - lock_page(sum_page)
896 if (type == SUM_TYPE_NODE)
897 gc_node_segment(sbi, sum->entries, segno, gc_type);
899 gc_data_segment(sbi, sum->entries, gc_list, segno,
902 stat_inc_seg_count(sbi, type, gc_type);
904 f2fs_put_page(sum_page, 0);
907 if (gc_type == FG_GC)
908 f2fs_submit_merged_bio(sbi,
909 (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
911 blk_finish_plug(&plug);
913 if (gc_type == FG_GC &&
914 get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
917 stat_inc_call_count(sbi->stat_info);
922 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
925 int gc_type = sync ? FG_GC : BG_GC;
928 struct cp_control cpc;
929 struct gc_inode_list gc_list = {
930 .ilist = LIST_HEAD_INIT(gc_list.ilist),
931 .iroot = RADIX_TREE_INIT(GFP_NOFS),
934 cpc.reason = __get_cp_reason(sbi);
938 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
940 if (unlikely(f2fs_cp_error(sbi))) {
945 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
948 * If there is no victim and no prefree segment but still not
949 * enough free sections, we should flush dent/node blocks and do
950 * garbage collections.
952 if (__get_victim(sbi, &segno, gc_type) ||
953 prefree_segments(sbi)) {
954 ret = write_checkpoint(sbi, &cpc);
958 } else if (has_not_enough_free_secs(sbi, 0, 0)) {
959 ret = write_checkpoint(sbi, &cpc);
965 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
969 if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
973 if (gc_type == FG_GC)
974 sbi->cur_victim_sec = NULL_SEGNO;
977 if (has_not_enough_free_secs(sbi, sec_freed, 0))
980 if (gc_type == FG_GC)
981 ret = write_checkpoint(sbi, &cpc);
984 mutex_unlock(&sbi->gc_mutex);
986 put_gc_inode(&gc_list);
989 ret = sec_freed ? 0 : -EAGAIN;
993 void build_gc_manager(struct f2fs_sb_info *sbi)
995 u64 main_count, resv_count, ovp_count, blocks_per_sec;
997 DIRTY_I(sbi)->v_ops = &default_v_ops;
999 /* threshold of # of valid blocks in a section for victims of FG_GC */
1000 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
1001 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1002 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1003 blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
1005 sbi->fggc_threshold = div_u64((main_count - ovp_count) * blocks_per_sec,
1006 (main_count - resv_count));