GNU Linux-libre 5.10.217-gnu1
[releases.git] / fs / f2fs / gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/gc.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/backing-dev.h>
12 #include <linux/init.h>
13 #include <linux/f2fs_fs.h>
14 #include <linux/kthread.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/sched/signal.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "gc.h"
23 #include <trace/events/f2fs.h>
24
25 static struct kmem_cache *victim_entry_slab;
26
27 static unsigned int count_bits(const unsigned long *addr,
28                                 unsigned int offset, unsigned int len);
29
30 static int gc_thread_func(void *data)
31 {
32         struct f2fs_sb_info *sbi = data;
33         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
34         wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
35         unsigned int wait_ms;
36
37         wait_ms = gc_th->min_sleep_time;
38
39         set_freezable();
40         do {
41                 bool sync_mode;
42
43                 wait_event_interruptible_timeout(*wq,
44                                 kthread_should_stop() || freezing(current) ||
45                                 gc_th->gc_wake,
46                                 msecs_to_jiffies(wait_ms));
47
48                 /* give it a try one time */
49                 if (gc_th->gc_wake)
50                         gc_th->gc_wake = 0;
51
52                 if (try_to_freeze()) {
53                         stat_other_skip_bggc_count(sbi);
54                         continue;
55                 }
56                 if (kthread_should_stop())
57                         break;
58
59                 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
60                         increase_sleep_time(gc_th, &wait_ms);
61                         stat_other_skip_bggc_count(sbi);
62                         continue;
63                 }
64
65                 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
66                         f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
67                         f2fs_stop_checkpoint(sbi, false);
68                 }
69
70                 if (!sb_start_write_trylock(sbi->sb)) {
71                         stat_other_skip_bggc_count(sbi);
72                         continue;
73                 }
74
75                 /*
76                  * [GC triggering condition]
77                  * 0. GC is not conducted currently.
78                  * 1. There are enough dirty segments.
79                  * 2. IO subsystem is idle by checking the # of writeback pages.
80                  * 3. IO subsystem is idle by checking the # of requests in
81                  *    bdev's request list.
82                  *
83                  * Note) We have to avoid triggering GCs frequently.
84                  * Because it is possible that some segments can be
85                  * invalidated soon after by user update or deletion.
86                  * So, I'd like to wait some time to collect dirty segments.
87                  */
88                 if (sbi->gc_mode == GC_URGENT_HIGH) {
89                         wait_ms = gc_th->urgent_sleep_time;
90                         down_write(&sbi->gc_lock);
91                         goto do_gc;
92                 }
93
94                 if (!down_write_trylock(&sbi->gc_lock)) {
95                         stat_other_skip_bggc_count(sbi);
96                         goto next;
97                 }
98
99                 if (!is_idle(sbi, GC_TIME)) {
100                         increase_sleep_time(gc_th, &wait_ms);
101                         up_write(&sbi->gc_lock);
102                         stat_io_skip_bggc_count(sbi);
103                         goto next;
104                 }
105
106                 if (has_enough_invalid_blocks(sbi))
107                         decrease_sleep_time(gc_th, &wait_ms);
108                 else
109                         increase_sleep_time(gc_th, &wait_ms);
110 do_gc:
111                 stat_inc_bggc_count(sbi->stat_info);
112
113                 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
114
115                 /* if return value is not zero, no victim was selected */
116                 if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
117                         wait_ms = gc_th->no_gc_sleep_time;
118
119                 trace_f2fs_background_gc(sbi->sb, wait_ms,
120                                 prefree_segments(sbi), free_segments(sbi));
121
122                 /* balancing f2fs's metadata periodically */
123                 f2fs_balance_fs_bg(sbi, true);
124 next:
125                 sb_end_write(sbi->sb);
126
127         } while (!kthread_should_stop());
128         return 0;
129 }
130
131 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
132 {
133         struct f2fs_gc_kthread *gc_th;
134         dev_t dev = sbi->sb->s_bdev->bd_dev;
135         int err = 0;
136
137         gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
138         if (!gc_th) {
139                 err = -ENOMEM;
140                 goto out;
141         }
142
143         gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
144         gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
145         gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
146         gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
147
148         gc_th->gc_wake= 0;
149
150         sbi->gc_thread = gc_th;
151         init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
152         sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
153                         "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
154         if (IS_ERR(gc_th->f2fs_gc_task)) {
155                 err = PTR_ERR(gc_th->f2fs_gc_task);
156                 kfree(gc_th);
157                 sbi->gc_thread = NULL;
158         }
159 out:
160         return err;
161 }
162
163 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
164 {
165         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
166         if (!gc_th)
167                 return;
168         kthread_stop(gc_th->f2fs_gc_task);
169         kfree(gc_th);
170         sbi->gc_thread = NULL;
171 }
172
173 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
174 {
175         int gc_mode;
176
177         if (gc_type == BG_GC) {
178                 if (sbi->am.atgc_enabled)
179                         gc_mode = GC_AT;
180                 else
181                         gc_mode = GC_CB;
182         } else {
183                 gc_mode = GC_GREEDY;
184         }
185
186         switch (sbi->gc_mode) {
187         case GC_IDLE_CB:
188                 gc_mode = GC_CB;
189                 break;
190         case GC_IDLE_GREEDY:
191         case GC_URGENT_HIGH:
192                 gc_mode = GC_GREEDY;
193                 break;
194         case GC_IDLE_AT:
195                 gc_mode = GC_AT;
196                 break;
197         }
198
199         return gc_mode;
200 }
201
202 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
203                         int type, struct victim_sel_policy *p)
204 {
205         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
206
207         if (p->alloc_mode == SSR) {
208                 p->gc_mode = GC_GREEDY;
209                 p->dirty_bitmap = dirty_i->dirty_segmap[type];
210                 p->max_search = dirty_i->nr_dirty[type];
211                 p->ofs_unit = 1;
212         } else if (p->alloc_mode == AT_SSR) {
213                 p->gc_mode = GC_GREEDY;
214                 p->dirty_bitmap = dirty_i->dirty_segmap[type];
215                 p->max_search = dirty_i->nr_dirty[type];
216                 p->ofs_unit = 1;
217         } else {
218                 p->gc_mode = select_gc_type(sbi, gc_type);
219                 p->ofs_unit = sbi->segs_per_sec;
220                 if (__is_large_section(sbi)) {
221                         p->dirty_bitmap = dirty_i->dirty_secmap;
222                         p->max_search = count_bits(p->dirty_bitmap,
223                                                 0, MAIN_SECS(sbi));
224                 } else {
225                         p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
226                         p->max_search = dirty_i->nr_dirty[DIRTY];
227                 }
228         }
229
230         /*
231          * adjust candidates range, should select all dirty segments for
232          * foreground GC and urgent GC cases.
233          */
234         if (gc_type != FG_GC &&
235                         (sbi->gc_mode != GC_URGENT_HIGH) &&
236                         (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
237                         p->max_search > sbi->max_victim_search)
238                 p->max_search = sbi->max_victim_search;
239
240         /* let's select beginning hot/small space first in no_heap mode*/
241         if (test_opt(sbi, NOHEAP) &&
242                 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
243                 p->offset = 0;
244         else
245                 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
246 }
247
248 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
249                                 struct victim_sel_policy *p)
250 {
251         /* SSR allocates in a segment unit */
252         if (p->alloc_mode == SSR)
253                 return sbi->blocks_per_seg;
254         else if (p->alloc_mode == AT_SSR)
255                 return UINT_MAX;
256
257         /* LFS */
258         if (p->gc_mode == GC_GREEDY)
259                 return 2 * sbi->blocks_per_seg * p->ofs_unit;
260         else if (p->gc_mode == GC_CB)
261                 return UINT_MAX;
262         else if (p->gc_mode == GC_AT)
263                 return UINT_MAX;
264         else /* No other gc_mode */
265                 return 0;
266 }
267
268 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
269 {
270         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
271         unsigned int secno;
272
273         /*
274          * If the gc_type is FG_GC, we can select victim segments
275          * selected by background GC before.
276          * Those segments guarantee they have small valid blocks.
277          */
278         for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
279                 if (sec_usage_check(sbi, secno))
280                         continue;
281                 clear_bit(secno, dirty_i->victim_secmap);
282                 return GET_SEG_FROM_SEC(sbi, secno);
283         }
284         return NULL_SEGNO;
285 }
286
287 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
288 {
289         struct sit_info *sit_i = SIT_I(sbi);
290         unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
291         unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
292         unsigned long long mtime = 0;
293         unsigned int vblocks;
294         unsigned char age = 0;
295         unsigned char u;
296         unsigned int i;
297         unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
298
299         for (i = 0; i < usable_segs_per_sec; i++)
300                 mtime += get_seg_entry(sbi, start + i)->mtime;
301         vblocks = get_valid_blocks(sbi, segno, true);
302
303         mtime = div_u64(mtime, usable_segs_per_sec);
304         vblocks = div_u64(vblocks, usable_segs_per_sec);
305
306         u = (vblocks * 100) >> sbi->log_blocks_per_seg;
307
308         /* Handle if the system time has changed by the user */
309         if (mtime < sit_i->min_mtime)
310                 sit_i->min_mtime = mtime;
311         if (mtime > sit_i->max_mtime)
312                 sit_i->max_mtime = mtime;
313         if (sit_i->max_mtime != sit_i->min_mtime)
314                 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
315                                 sit_i->max_mtime - sit_i->min_mtime);
316
317         return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
318 }
319
320 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
321                         unsigned int segno, struct victim_sel_policy *p)
322 {
323         if (p->alloc_mode == SSR)
324                 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
325
326         /* alloc_mode == LFS */
327         if (p->gc_mode == GC_GREEDY)
328                 return get_valid_blocks(sbi, segno, true);
329         else if (p->gc_mode == GC_CB)
330                 return get_cb_cost(sbi, segno);
331
332         f2fs_bug_on(sbi, 1);
333         return 0;
334 }
335
336 static unsigned int count_bits(const unsigned long *addr,
337                                 unsigned int offset, unsigned int len)
338 {
339         unsigned int end = offset + len, sum = 0;
340
341         while (offset < end) {
342                 if (test_bit(offset++, addr))
343                         ++sum;
344         }
345         return sum;
346 }
347
348 static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
349                                 unsigned long long mtime, unsigned int segno,
350                                 struct rb_node *parent, struct rb_node **p,
351                                 bool left_most)
352 {
353         struct atgc_management *am = &sbi->am;
354         struct victim_entry *ve;
355
356         ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS);
357
358         ve->mtime = mtime;
359         ve->segno = segno;
360
361         rb_link_node(&ve->rb_node, parent, p);
362         rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
363
364         list_add_tail(&ve->list, &am->victim_list);
365
366         am->victim_count++;
367
368         return ve;
369 }
370
371 static void insert_victim_entry(struct f2fs_sb_info *sbi,
372                                 unsigned long long mtime, unsigned int segno)
373 {
374         struct atgc_management *am = &sbi->am;
375         struct rb_node **p;
376         struct rb_node *parent = NULL;
377         bool left_most = true;
378
379         p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
380         attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
381 }
382
383 static void add_victim_entry(struct f2fs_sb_info *sbi,
384                                 struct victim_sel_policy *p, unsigned int segno)
385 {
386         struct sit_info *sit_i = SIT_I(sbi);
387         unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
388         unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
389         unsigned long long mtime = 0;
390         unsigned int i;
391
392         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
393                 if (p->gc_mode == GC_AT &&
394                         get_valid_blocks(sbi, segno, true) == 0)
395                         return;
396         }
397
398         for (i = 0; i < sbi->segs_per_sec; i++)
399                 mtime += get_seg_entry(sbi, start + i)->mtime;
400         mtime = div_u64(mtime, sbi->segs_per_sec);
401
402         /* Handle if the system time has changed by the user */
403         if (mtime < sit_i->min_mtime)
404                 sit_i->min_mtime = mtime;
405         if (mtime > sit_i->max_mtime)
406                 sit_i->max_mtime = mtime;
407         if (mtime < sit_i->dirty_min_mtime)
408                 sit_i->dirty_min_mtime = mtime;
409         if (mtime > sit_i->dirty_max_mtime)
410                 sit_i->dirty_max_mtime = mtime;
411
412         /* don't choose young section as candidate */
413         if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
414                 return;
415
416         insert_victim_entry(sbi, mtime, segno);
417 }
418
419 static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
420                                                 struct victim_sel_policy *p)
421 {
422         struct atgc_management *am = &sbi->am;
423         struct rb_node *parent = NULL;
424         bool left_most;
425
426         f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
427
428         return parent;
429 }
430
431 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
432                                                 struct victim_sel_policy *p)
433 {
434         struct sit_info *sit_i = SIT_I(sbi);
435         struct atgc_management *am = &sbi->am;
436         struct rb_root_cached *root = &am->root;
437         struct rb_node *node;
438         struct rb_entry *re;
439         struct victim_entry *ve;
440         unsigned long long total_time;
441         unsigned long long age, u, accu;
442         unsigned long long max_mtime = sit_i->dirty_max_mtime;
443         unsigned long long min_mtime = sit_i->dirty_min_mtime;
444         unsigned int sec_blocks = BLKS_PER_SEC(sbi);
445         unsigned int vblocks;
446         unsigned int dirty_threshold = max(am->max_candidate_count,
447                                         am->candidate_ratio *
448                                         am->victim_count / 100);
449         unsigned int age_weight = am->age_weight;
450         unsigned int cost;
451         unsigned int iter = 0;
452
453         if (max_mtime < min_mtime)
454                 return;
455
456         max_mtime += 1;
457         total_time = max_mtime - min_mtime;
458
459         accu = div64_u64(ULLONG_MAX, total_time);
460         accu = min_t(unsigned long long, div_u64(accu, 100),
461                                         DEFAULT_ACCURACY_CLASS);
462
463         node = rb_first_cached(root);
464 next:
465         re = rb_entry_safe(node, struct rb_entry, rb_node);
466         if (!re)
467                 return;
468
469         ve = (struct victim_entry *)re;
470
471         if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
472                 goto skip;
473
474         /* age = 10000 * x% * 60 */
475         age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
476                                                                 age_weight;
477
478         vblocks = get_valid_blocks(sbi, ve->segno, true);
479         f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
480
481         /* u = 10000 * x% * 40 */
482         u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
483                                                         (100 - age_weight);
484
485         f2fs_bug_on(sbi, age + u >= UINT_MAX);
486
487         cost = UINT_MAX - (age + u);
488         iter++;
489
490         if (cost < p->min_cost ||
491                         (cost == p->min_cost && age > p->oldest_age)) {
492                 p->min_cost = cost;
493                 p->oldest_age = age;
494                 p->min_segno = ve->segno;
495         }
496 skip:
497         if (iter < dirty_threshold) {
498                 node = rb_next(node);
499                 goto next;
500         }
501 }
502
503 /*
504  * select candidates around source section in range of
505  * [target - dirty_threshold, target + dirty_threshold]
506  */
507 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
508                                                 struct victim_sel_policy *p)
509 {
510         struct sit_info *sit_i = SIT_I(sbi);
511         struct atgc_management *am = &sbi->am;
512         struct rb_node *node;
513         struct rb_entry *re;
514         struct victim_entry *ve;
515         unsigned long long age;
516         unsigned long long max_mtime = sit_i->dirty_max_mtime;
517         unsigned long long min_mtime = sit_i->dirty_min_mtime;
518         unsigned int seg_blocks = sbi->blocks_per_seg;
519         unsigned int vblocks;
520         unsigned int dirty_threshold = max(am->max_candidate_count,
521                                         am->candidate_ratio *
522                                         am->victim_count / 100);
523         unsigned int cost;
524         unsigned int iter = 0;
525         int stage = 0;
526
527         if (max_mtime < min_mtime)
528                 return;
529         max_mtime += 1;
530 next_stage:
531         node = lookup_central_victim(sbi, p);
532 next_node:
533         re = rb_entry_safe(node, struct rb_entry, rb_node);
534         if (!re) {
535                 if (stage == 0)
536                         goto skip_stage;
537                 return;
538         }
539
540         ve = (struct victim_entry *)re;
541
542         if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
543                 goto skip_node;
544
545         age = max_mtime - ve->mtime;
546
547         vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
548         f2fs_bug_on(sbi, !vblocks);
549
550         /* rare case */
551         if (vblocks == seg_blocks)
552                 goto skip_node;
553
554         iter++;
555
556         age = max_mtime - abs(p->age - age);
557         cost = UINT_MAX - vblocks;
558
559         if (cost < p->min_cost ||
560                         (cost == p->min_cost && age > p->oldest_age)) {
561                 p->min_cost = cost;
562                 p->oldest_age = age;
563                 p->min_segno = ve->segno;
564         }
565 skip_node:
566         if (iter < dirty_threshold) {
567                 if (stage == 0)
568                         node = rb_prev(node);
569                 else if (stage == 1)
570                         node = rb_next(node);
571                 goto next_node;
572         }
573 skip_stage:
574         if (stage < 1) {
575                 stage++;
576                 iter = 0;
577                 goto next_stage;
578         }
579 }
580 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
581                                                 struct victim_sel_policy *p)
582 {
583         f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
584                                                 &sbi->am.root, true));
585
586         if (p->gc_mode == GC_AT)
587                 atgc_lookup_victim(sbi, p);
588         else if (p->alloc_mode == AT_SSR)
589                 atssr_lookup_victim(sbi, p);
590         else
591                 f2fs_bug_on(sbi, 1);
592 }
593
594 static void release_victim_entry(struct f2fs_sb_info *sbi)
595 {
596         struct atgc_management *am = &sbi->am;
597         struct victim_entry *ve, *tmp;
598
599         list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
600                 list_del(&ve->list);
601                 kmem_cache_free(victim_entry_slab, ve);
602                 am->victim_count--;
603         }
604
605         am->root = RB_ROOT_CACHED;
606
607         f2fs_bug_on(sbi, am->victim_count);
608         f2fs_bug_on(sbi, !list_empty(&am->victim_list));
609 }
610
611 /*
612  * This function is called from two paths.
613  * One is garbage collection and the other is SSR segment selection.
614  * When it is called during GC, it just gets a victim segment
615  * and it does not remove it from dirty seglist.
616  * When it is called from SSR segment selection, it finds a segment
617  * which has minimum valid blocks and removes it from dirty seglist.
618  */
619 static int get_victim_by_default(struct f2fs_sb_info *sbi,
620                         unsigned int *result, int gc_type, int type,
621                         char alloc_mode, unsigned long long age)
622 {
623         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
624         struct sit_info *sm = SIT_I(sbi);
625         struct victim_sel_policy p;
626         unsigned int secno, last_victim;
627         unsigned int last_segment;
628         unsigned int nsearched;
629         bool is_atgc;
630         int ret = 0;
631
632         mutex_lock(&dirty_i->seglist_lock);
633         last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
634
635         p.alloc_mode = alloc_mode;
636         p.age = age;
637         p.age_threshold = sbi->am.age_threshold;
638
639 retry:
640         select_policy(sbi, gc_type, type, &p);
641         p.min_segno = NULL_SEGNO;
642         p.oldest_age = 0;
643         p.min_cost = get_max_cost(sbi, &p);
644
645         is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
646         nsearched = 0;
647
648         if (is_atgc)
649                 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
650
651         if (*result != NULL_SEGNO) {
652                 if (!get_valid_blocks(sbi, *result, false)) {
653                         ret = -ENODATA;
654                         goto out;
655                 }
656
657                 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
658                         ret = -EBUSY;
659                 else
660                         p.min_segno = *result;
661                 goto out;
662         }
663
664         ret = -ENODATA;
665         if (p.max_search == 0)
666                 goto out;
667
668         if (__is_large_section(sbi) && p.alloc_mode == LFS) {
669                 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
670                         p.min_segno = sbi->next_victim_seg[BG_GC];
671                         *result = p.min_segno;
672                         sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
673                         goto got_result;
674                 }
675                 if (gc_type == FG_GC &&
676                                 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
677                         p.min_segno = sbi->next_victim_seg[FG_GC];
678                         *result = p.min_segno;
679                         sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
680                         goto got_result;
681                 }
682         }
683
684         last_victim = sm->last_victim[p.gc_mode];
685         if (p.alloc_mode == LFS && gc_type == FG_GC) {
686                 p.min_segno = check_bg_victims(sbi);
687                 if (p.min_segno != NULL_SEGNO)
688                         goto got_it;
689         }
690
691         while (1) {
692                 unsigned long cost, *dirty_bitmap;
693                 unsigned int unit_no, segno;
694
695                 dirty_bitmap = p.dirty_bitmap;
696                 unit_no = find_next_bit(dirty_bitmap,
697                                 last_segment / p.ofs_unit,
698                                 p.offset / p.ofs_unit);
699                 segno = unit_no * p.ofs_unit;
700                 if (segno >= last_segment) {
701                         if (sm->last_victim[p.gc_mode]) {
702                                 last_segment =
703                                         sm->last_victim[p.gc_mode];
704                                 sm->last_victim[p.gc_mode] = 0;
705                                 p.offset = 0;
706                                 continue;
707                         }
708                         break;
709                 }
710
711                 p.offset = segno + p.ofs_unit;
712                 nsearched++;
713
714 #ifdef CONFIG_F2FS_CHECK_FS
715                 /*
716                  * skip selecting the invalid segno (that is failed due to block
717                  * validity check failure during GC) to avoid endless GC loop in
718                  * such cases.
719                  */
720                 if (test_bit(segno, sm->invalid_segmap))
721                         goto next;
722 #endif
723
724                 secno = GET_SEC_FROM_SEG(sbi, segno);
725
726                 if (sec_usage_check(sbi, secno))
727                         goto next;
728
729                 /* Don't touch checkpointed data */
730                 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
731                         if (p.alloc_mode == LFS) {
732                                 /*
733                                  * LFS is set to find source section during GC.
734                                  * The victim should have no checkpointed data.
735                                  */
736                                 if (get_ckpt_valid_blocks(sbi, segno, true))
737                                         goto next;
738                         } else {
739                                 /*
740                                  * SSR | AT_SSR are set to find target segment
741                                  * for writes which can be full by checkpointed
742                                  * and newly written blocks.
743                                  */
744                                 if (!f2fs_segment_has_free_slot(sbi, segno))
745                                         goto next;
746                         }
747                 }
748
749                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
750                         goto next;
751
752                 if (is_atgc) {
753                         add_victim_entry(sbi, &p, segno);
754                         goto next;
755                 }
756
757                 cost = get_gc_cost(sbi, segno, &p);
758
759                 if (p.min_cost > cost) {
760                         p.min_segno = segno;
761                         p.min_cost = cost;
762                 }
763 next:
764                 if (nsearched >= p.max_search) {
765                         if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
766                                 sm->last_victim[p.gc_mode] =
767                                         last_victim + p.ofs_unit;
768                         else
769                                 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
770                         sm->last_victim[p.gc_mode] %=
771                                 (MAIN_SECS(sbi) * sbi->segs_per_sec);
772                         break;
773                 }
774         }
775
776         /* get victim for GC_AT/AT_SSR */
777         if (is_atgc) {
778                 lookup_victim_by_age(sbi, &p);
779                 release_victim_entry(sbi);
780         }
781
782         if (is_atgc && p.min_segno == NULL_SEGNO &&
783                         sm->elapsed_time < p.age_threshold) {
784                 p.age_threshold = 0;
785                 goto retry;
786         }
787
788         if (p.min_segno != NULL_SEGNO) {
789 got_it:
790                 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
791 got_result:
792                 if (p.alloc_mode == LFS) {
793                         secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
794                         if (gc_type == FG_GC)
795                                 sbi->cur_victim_sec = secno;
796                         else
797                                 set_bit(secno, dirty_i->victim_secmap);
798                 }
799                 ret = 0;
800
801         }
802 out:
803         if (p.min_segno != NULL_SEGNO)
804                 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
805                                 sbi->cur_victim_sec,
806                                 prefree_segments(sbi), free_segments(sbi));
807         mutex_unlock(&dirty_i->seglist_lock);
808
809         return ret;
810 }
811
812 static const struct victim_selection default_v_ops = {
813         .get_victim = get_victim_by_default,
814 };
815
816 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
817 {
818         struct inode_entry *ie;
819
820         ie = radix_tree_lookup(&gc_list->iroot, ino);
821         if (ie)
822                 return ie->inode;
823         return NULL;
824 }
825
826 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
827 {
828         struct inode_entry *new_ie;
829
830         if (inode == find_gc_inode(gc_list, inode->i_ino)) {
831                 iput(inode);
832                 return;
833         }
834         new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
835         new_ie->inode = inode;
836
837         f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
838         list_add_tail(&new_ie->list, &gc_list->ilist);
839 }
840
841 static void put_gc_inode(struct gc_inode_list *gc_list)
842 {
843         struct inode_entry *ie, *next_ie;
844         list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
845                 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
846                 iput(ie->inode);
847                 list_del(&ie->list);
848                 kmem_cache_free(f2fs_inode_entry_slab, ie);
849         }
850 }
851
852 static int check_valid_map(struct f2fs_sb_info *sbi,
853                                 unsigned int segno, int offset)
854 {
855         struct sit_info *sit_i = SIT_I(sbi);
856         struct seg_entry *sentry;
857         int ret;
858
859         down_read(&sit_i->sentry_lock);
860         sentry = get_seg_entry(sbi, segno);
861         ret = f2fs_test_bit(offset, sentry->cur_valid_map);
862         up_read(&sit_i->sentry_lock);
863         return ret;
864 }
865
866 /*
867  * This function compares node address got in summary with that in NAT.
868  * On validity, copy that node with cold status, otherwise (invalid node)
869  * ignore that.
870  */
871 static int gc_node_segment(struct f2fs_sb_info *sbi,
872                 struct f2fs_summary *sum, unsigned int segno, int gc_type)
873 {
874         struct f2fs_summary *entry;
875         block_t start_addr;
876         int off;
877         int phase = 0;
878         bool fggc = (gc_type == FG_GC);
879         int submitted = 0;
880         unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
881
882         start_addr = START_BLOCK(sbi, segno);
883
884 next_step:
885         entry = sum;
886
887         if (fggc && phase == 2)
888                 atomic_inc(&sbi->wb_sync_req[NODE]);
889
890         for (off = 0; off < usable_blks_in_seg; off++, entry++) {
891                 nid_t nid = le32_to_cpu(entry->nid);
892                 struct page *node_page;
893                 struct node_info ni;
894                 int err;
895
896                 /* stop BG_GC if there is not enough free sections. */
897                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
898                         return submitted;
899
900                 if (check_valid_map(sbi, segno, off) == 0)
901                         continue;
902
903                 if (phase == 0) {
904                         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
905                                                         META_NAT, true);
906                         continue;
907                 }
908
909                 if (phase == 1) {
910                         f2fs_ra_node_page(sbi, nid);
911                         continue;
912                 }
913
914                 /* phase == 2 */
915                 node_page = f2fs_get_node_page(sbi, nid);
916                 if (IS_ERR(node_page))
917                         continue;
918
919                 /* block may become invalid during f2fs_get_node_page */
920                 if (check_valid_map(sbi, segno, off) == 0) {
921                         f2fs_put_page(node_page, 1);
922                         continue;
923                 }
924
925                 if (f2fs_get_node_info(sbi, nid, &ni)) {
926                         f2fs_put_page(node_page, 1);
927                         continue;
928                 }
929
930                 if (ni.blk_addr != start_addr + off) {
931                         f2fs_put_page(node_page, 1);
932                         continue;
933                 }
934
935                 err = f2fs_move_node_page(node_page, gc_type);
936                 if (!err && gc_type == FG_GC)
937                         submitted++;
938                 stat_inc_node_blk_count(sbi, 1, gc_type);
939         }
940
941         if (++phase < 3)
942                 goto next_step;
943
944         if (fggc)
945                 atomic_dec(&sbi->wb_sync_req[NODE]);
946         return submitted;
947 }
948
949 /*
950  * Calculate start block index indicating the given node offset.
951  * Be careful, caller should give this node offset only indicating direct node
952  * blocks. If any node offsets, which point the other types of node blocks such
953  * as indirect or double indirect node blocks, are given, it must be a caller's
954  * bug.
955  */
956 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
957 {
958         unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
959         unsigned int bidx;
960
961         if (node_ofs == 0)
962                 return 0;
963
964         if (node_ofs <= 2) {
965                 bidx = node_ofs - 1;
966         } else if (node_ofs <= indirect_blks) {
967                 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
968                 bidx = node_ofs - 2 - dec;
969         } else {
970                 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
971                 bidx = node_ofs - 5 - dec;
972         }
973         return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
974 }
975
976 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
977                 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
978 {
979         struct page *node_page;
980         nid_t nid;
981         unsigned int ofs_in_node, max_addrs, base;
982         block_t source_blkaddr;
983
984         nid = le32_to_cpu(sum->nid);
985         ofs_in_node = le16_to_cpu(sum->ofs_in_node);
986
987         node_page = f2fs_get_node_page(sbi, nid);
988         if (IS_ERR(node_page))
989                 return false;
990
991         if (f2fs_get_node_info(sbi, nid, dni)) {
992                 f2fs_put_page(node_page, 1);
993                 return false;
994         }
995
996         if (sum->version != dni->version) {
997                 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
998                           __func__);
999                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1000         }
1001
1002         if (f2fs_check_nid_range(sbi, dni->ino)) {
1003                 f2fs_put_page(node_page, 1);
1004                 return false;
1005         }
1006
1007         if (IS_INODE(node_page)) {
1008                 base = offset_in_addr(F2FS_INODE(node_page));
1009                 max_addrs = DEF_ADDRS_PER_INODE;
1010         } else {
1011                 base = 0;
1012                 max_addrs = DEF_ADDRS_PER_BLOCK;
1013         }
1014
1015         if (base + ofs_in_node >= max_addrs) {
1016                 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1017                         base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1018                 f2fs_put_page(node_page, 1);
1019                 return false;
1020         }
1021
1022         *nofs = ofs_of_node(node_page);
1023         source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1024         f2fs_put_page(node_page, 1);
1025
1026         if (source_blkaddr != blkaddr) {
1027 #ifdef CONFIG_F2FS_CHECK_FS
1028                 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1029                 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1030
1031                 if (unlikely(check_valid_map(sbi, segno, offset))) {
1032                         if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1033                                 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
1034                                                 blkaddr, source_blkaddr, segno);
1035                                 f2fs_bug_on(sbi, 1);
1036                         }
1037                 }
1038 #endif
1039                 return false;
1040         }
1041         return true;
1042 }
1043
1044 static int ra_data_block(struct inode *inode, pgoff_t index)
1045 {
1046         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1047         struct address_space *mapping = inode->i_mapping;
1048         struct dnode_of_data dn;
1049         struct page *page;
1050         struct extent_info ei = {0, 0, 0};
1051         struct f2fs_io_info fio = {
1052                 .sbi = sbi,
1053                 .ino = inode->i_ino,
1054                 .type = DATA,
1055                 .temp = COLD,
1056                 .op = REQ_OP_READ,
1057                 .op_flags = 0,
1058                 .encrypted_page = NULL,
1059                 .in_list = false,
1060                 .retry = false,
1061         };
1062         int err;
1063
1064         page = f2fs_grab_cache_page(mapping, index, true);
1065         if (!page)
1066                 return -ENOMEM;
1067
1068         if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1069                 dn.data_blkaddr = ei.blk + index - ei.fofs;
1070                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1071                                                 DATA_GENERIC_ENHANCE_READ))) {
1072                         err = -EFSCORRUPTED;
1073                         goto put_page;
1074                 }
1075                 goto got_it;
1076         }
1077
1078         set_new_dnode(&dn, inode, NULL, NULL, 0);
1079         err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1080         if (err)
1081                 goto put_page;
1082         f2fs_put_dnode(&dn);
1083
1084         if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1085                 err = -ENOENT;
1086                 goto put_page;
1087         }
1088         if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1089                                                 DATA_GENERIC_ENHANCE))) {
1090                 err = -EFSCORRUPTED;
1091                 goto put_page;
1092         }
1093 got_it:
1094         /* read page */
1095         fio.page = page;
1096         fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1097
1098         /*
1099          * don't cache encrypted data into meta inode until previous dirty
1100          * data were writebacked to avoid racing between GC and flush.
1101          */
1102         f2fs_wait_on_page_writeback(page, DATA, true, true);
1103
1104         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1105
1106         fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1107                                         dn.data_blkaddr,
1108                                         FGP_LOCK | FGP_CREAT, GFP_NOFS);
1109         if (!fio.encrypted_page) {
1110                 err = -ENOMEM;
1111                 goto put_page;
1112         }
1113
1114         err = f2fs_submit_page_bio(&fio);
1115         if (err)
1116                 goto put_encrypted_page;
1117         f2fs_put_page(fio.encrypted_page, 0);
1118         f2fs_put_page(page, 1);
1119
1120         f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1121         f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1122
1123         return 0;
1124 put_encrypted_page:
1125         f2fs_put_page(fio.encrypted_page, 1);
1126 put_page:
1127         f2fs_put_page(page, 1);
1128         return err;
1129 }
1130
1131 /*
1132  * Move data block via META_MAPPING while keeping locked data page.
1133  * This can be used to move blocks, aka LBAs, directly on disk.
1134  */
1135 static int move_data_block(struct inode *inode, block_t bidx,
1136                                 int gc_type, unsigned int segno, int off)
1137 {
1138         struct f2fs_io_info fio = {
1139                 .sbi = F2FS_I_SB(inode),
1140                 .ino = inode->i_ino,
1141                 .type = DATA,
1142                 .temp = COLD,
1143                 .op = REQ_OP_READ,
1144                 .op_flags = 0,
1145                 .encrypted_page = NULL,
1146                 .in_list = false,
1147                 .retry = false,
1148         };
1149         struct dnode_of_data dn;
1150         struct f2fs_summary sum;
1151         struct node_info ni;
1152         struct page *page, *mpage;
1153         block_t newaddr;
1154         int err = 0;
1155         bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1156         int type = fio.sbi->am.atgc_enabled ?
1157                                 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1158
1159         /* do not read out */
1160         page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1161         if (!page)
1162                 return -ENOMEM;
1163
1164         if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1165                 err = -ENOENT;
1166                 goto out;
1167         }
1168
1169         if (f2fs_is_atomic_file(inode)) {
1170                 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1171                 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1172                 err = -EAGAIN;
1173                 goto out;
1174         }
1175
1176         if (f2fs_is_pinned_file(inode)) {
1177                 if (gc_type == FG_GC)
1178                         f2fs_pin_file_control(inode, true);
1179                 err = -EAGAIN;
1180                 goto out;
1181         }
1182
1183         set_new_dnode(&dn, inode, NULL, NULL, 0);
1184         err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1185         if (err)
1186                 goto out;
1187
1188         if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1189                 ClearPageUptodate(page);
1190                 err = -ENOENT;
1191                 goto put_out;
1192         }
1193
1194         /*
1195          * don't cache encrypted data into meta inode until previous dirty
1196          * data were writebacked to avoid racing between GC and flush.
1197          */
1198         f2fs_wait_on_page_writeback(page, DATA, true, true);
1199
1200         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1201
1202         err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1203         if (err)
1204                 goto put_out;
1205
1206         set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1207
1208         /* read page */
1209         fio.page = page;
1210         fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1211
1212         if (lfs_mode)
1213                 down_write(&fio.sbi->io_order_lock);
1214
1215         mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1216                                         fio.old_blkaddr, false);
1217         if (!mpage) {
1218                 err = -ENOMEM;
1219                 goto up_out;
1220         }
1221
1222         fio.encrypted_page = mpage;
1223
1224         /* read source block in mpage */
1225         if (!PageUptodate(mpage)) {
1226                 err = f2fs_submit_page_bio(&fio);
1227                 if (err) {
1228                         f2fs_put_page(mpage, 1);
1229                         goto up_out;
1230                 }
1231
1232                 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1233                 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1234
1235                 lock_page(mpage);
1236                 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1237                                                 !PageUptodate(mpage))) {
1238                         err = -EIO;
1239                         f2fs_put_page(mpage, 1);
1240                         goto up_out;
1241                 }
1242         }
1243
1244         f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1245                                 &sum, type, NULL);
1246
1247         fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1248                                 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1249         if (!fio.encrypted_page) {
1250                 err = -ENOMEM;
1251                 f2fs_put_page(mpage, 1);
1252                 goto recover_block;
1253         }
1254
1255         /* write target block */
1256         f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1257         memcpy(page_address(fio.encrypted_page),
1258                                 page_address(mpage), PAGE_SIZE);
1259         f2fs_put_page(mpage, 1);
1260         invalidate_mapping_pages(META_MAPPING(fio.sbi),
1261                                 fio.old_blkaddr, fio.old_blkaddr);
1262
1263         set_page_dirty(fio.encrypted_page);
1264         if (clear_page_dirty_for_io(fio.encrypted_page))
1265                 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1266
1267         set_page_writeback(fio.encrypted_page);
1268         ClearPageError(page);
1269
1270         /* allocate block address */
1271         f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
1272
1273         fio.op = REQ_OP_WRITE;
1274         fio.op_flags = REQ_SYNC;
1275         fio.new_blkaddr = newaddr;
1276         f2fs_submit_page_write(&fio);
1277         if (fio.retry) {
1278                 err = -EAGAIN;
1279                 if (PageWriteback(fio.encrypted_page))
1280                         end_page_writeback(fio.encrypted_page);
1281                 goto put_page_out;
1282         }
1283
1284         f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1285
1286         f2fs_update_data_blkaddr(&dn, newaddr);
1287         set_inode_flag(inode, FI_APPEND_WRITE);
1288         if (page->index == 0)
1289                 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1290 put_page_out:
1291         f2fs_put_page(fio.encrypted_page, 1);
1292 recover_block:
1293         if (err)
1294                 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1295                                                         true, true, true);
1296 up_out:
1297         if (lfs_mode)
1298                 up_write(&fio.sbi->io_order_lock);
1299 put_out:
1300         f2fs_put_dnode(&dn);
1301 out:
1302         f2fs_put_page(page, 1);
1303         return err;
1304 }
1305
1306 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1307                                                         unsigned int segno, int off)
1308 {
1309         struct page *page;
1310         int err = 0;
1311
1312         page = f2fs_get_lock_data_page(inode, bidx, true);
1313         if (IS_ERR(page))
1314                 return PTR_ERR(page);
1315
1316         if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1317                 err = -ENOENT;
1318                 goto out;
1319         }
1320
1321         if (f2fs_is_atomic_file(inode)) {
1322                 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1323                 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1324                 err = -EAGAIN;
1325                 goto out;
1326         }
1327         if (f2fs_is_pinned_file(inode)) {
1328                 if (gc_type == FG_GC)
1329                         f2fs_pin_file_control(inode, true);
1330                 err = -EAGAIN;
1331                 goto out;
1332         }
1333
1334         if (gc_type == BG_GC) {
1335                 if (PageWriteback(page)) {
1336                         err = -EAGAIN;
1337                         goto out;
1338                 }
1339                 set_page_dirty(page);
1340                 set_cold_data(page);
1341         } else {
1342                 struct f2fs_io_info fio = {
1343                         .sbi = F2FS_I_SB(inode),
1344                         .ino = inode->i_ino,
1345                         .type = DATA,
1346                         .temp = COLD,
1347                         .op = REQ_OP_WRITE,
1348                         .op_flags = REQ_SYNC,
1349                         .old_blkaddr = NULL_ADDR,
1350                         .page = page,
1351                         .encrypted_page = NULL,
1352                         .need_lock = LOCK_REQ,
1353                         .io_type = FS_GC_DATA_IO,
1354                 };
1355                 bool is_dirty = PageDirty(page);
1356
1357 retry:
1358                 f2fs_wait_on_page_writeback(page, DATA, true, true);
1359
1360                 set_page_dirty(page);
1361                 if (clear_page_dirty_for_io(page)) {
1362                         inode_dec_dirty_pages(inode);
1363                         f2fs_remove_dirty_inode(inode);
1364                 }
1365
1366                 set_cold_data(page);
1367
1368                 err = f2fs_do_write_data_page(&fio);
1369                 if (err) {
1370                         clear_cold_data(page);
1371                         if (err == -ENOMEM) {
1372                                 congestion_wait(BLK_RW_ASYNC,
1373                                                 DEFAULT_IO_TIMEOUT);
1374                                 goto retry;
1375                         }
1376                         if (is_dirty)
1377                                 set_page_dirty(page);
1378                 }
1379         }
1380 out:
1381         f2fs_put_page(page, 1);
1382         return err;
1383 }
1384
1385 /*
1386  * This function tries to get parent node of victim data block, and identifies
1387  * data block validity. If the block is valid, copy that with cold status and
1388  * modify parent node.
1389  * If the parent node is not valid or the data block address is different,
1390  * the victim data block is ignored.
1391  */
1392 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1393                 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1394                 bool force_migrate)
1395 {
1396         struct super_block *sb = sbi->sb;
1397         struct f2fs_summary *entry;
1398         block_t start_addr;
1399         int off;
1400         int phase = 0;
1401         int submitted = 0;
1402         unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1403
1404         start_addr = START_BLOCK(sbi, segno);
1405
1406 next_step:
1407         entry = sum;
1408
1409         for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1410                 struct page *data_page;
1411                 struct inode *inode;
1412                 struct node_info dni; /* dnode info for the data */
1413                 unsigned int ofs_in_node, nofs;
1414                 block_t start_bidx;
1415                 nid_t nid = le32_to_cpu(entry->nid);
1416
1417                 /*
1418                  * stop BG_GC if there is not enough free sections.
1419                  * Or, stop GC if the segment becomes fully valid caused by
1420                  * race condition along with SSR block allocation.
1421                  */
1422                 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1423                         (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1424                                                         BLKS_PER_SEC(sbi)))
1425                         return submitted;
1426
1427                 if (check_valid_map(sbi, segno, off) == 0)
1428                         continue;
1429
1430                 if (phase == 0) {
1431                         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1432                                                         META_NAT, true);
1433                         continue;
1434                 }
1435
1436                 if (phase == 1) {
1437                         f2fs_ra_node_page(sbi, nid);
1438                         continue;
1439                 }
1440
1441                 /* Get an inode by ino with checking validity */
1442                 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1443                         continue;
1444
1445                 if (phase == 2) {
1446                         f2fs_ra_node_page(sbi, dni.ino);
1447                         continue;
1448                 }
1449
1450                 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1451
1452                 if (phase == 3) {
1453                         inode = f2fs_iget(sb, dni.ino);
1454                         if (IS_ERR(inode) || is_bad_inode(inode) ||
1455                                         special_file(inode->i_mode)) {
1456                                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1457                                 continue;
1458                         }
1459
1460                         if (!down_write_trylock(
1461                                 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1462                                 iput(inode);
1463                                 sbi->skipped_gc_rwsem++;
1464                                 continue;
1465                         }
1466
1467                         start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1468                                                                 ofs_in_node;
1469
1470                         if (f2fs_post_read_required(inode)) {
1471                                 int err = ra_data_block(inode, start_bidx);
1472
1473                                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1474                                 if (err) {
1475                                         iput(inode);
1476                                         continue;
1477                                 }
1478                                 add_gc_inode(gc_list, inode);
1479                                 continue;
1480                         }
1481
1482                         data_page = f2fs_get_read_data_page(inode,
1483                                                 start_bidx, REQ_RAHEAD, true);
1484                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1485                         if (IS_ERR(data_page)) {
1486                                 iput(inode);
1487                                 continue;
1488                         }
1489
1490                         f2fs_put_page(data_page, 0);
1491                         add_gc_inode(gc_list, inode);
1492                         continue;
1493                 }
1494
1495                 /* phase 4 */
1496                 inode = find_gc_inode(gc_list, dni.ino);
1497                 if (inode) {
1498                         struct f2fs_inode_info *fi = F2FS_I(inode);
1499                         bool locked = false;
1500                         int err;
1501
1502                         if (S_ISREG(inode->i_mode)) {
1503                                 if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
1504                                         sbi->skipped_gc_rwsem++;
1505                                         continue;
1506                                 }
1507                                 if (!down_write_trylock(
1508                                                 &fi->i_gc_rwsem[WRITE])) {
1509                                         sbi->skipped_gc_rwsem++;
1510                                         up_write(&fi->i_gc_rwsem[READ]);
1511                                         continue;
1512                                 }
1513                                 locked = true;
1514
1515                                 /* wait for all inflight aio data */
1516                                 inode_dio_wait(inode);
1517                         }
1518
1519                         start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1520                                                                 + ofs_in_node;
1521                         if (f2fs_post_read_required(inode))
1522                                 err = move_data_block(inode, start_bidx,
1523                                                         gc_type, segno, off);
1524                         else
1525                                 err = move_data_page(inode, start_bidx, gc_type,
1526                                                                 segno, off);
1527
1528                         if (!err && (gc_type == FG_GC ||
1529                                         f2fs_post_read_required(inode)))
1530                                 submitted++;
1531
1532                         if (locked) {
1533                                 up_write(&fi->i_gc_rwsem[WRITE]);
1534                                 up_write(&fi->i_gc_rwsem[READ]);
1535                         }
1536
1537                         stat_inc_data_blk_count(sbi, 1, gc_type);
1538                 }
1539         }
1540
1541         if (++phase < 5)
1542                 goto next_step;
1543
1544         return submitted;
1545 }
1546
1547 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1548                         int gc_type)
1549 {
1550         struct sit_info *sit_i = SIT_I(sbi);
1551         int ret;
1552
1553         down_write(&sit_i->sentry_lock);
1554         ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1555                                               NO_CHECK_TYPE, LFS, 0);
1556         up_write(&sit_i->sentry_lock);
1557         return ret;
1558 }
1559
1560 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1561                                 unsigned int start_segno,
1562                                 struct gc_inode_list *gc_list, int gc_type,
1563                                 bool force_migrate)
1564 {
1565         struct page *sum_page;
1566         struct f2fs_summary_block *sum;
1567         struct blk_plug plug;
1568         unsigned int segno = start_segno;
1569         unsigned int end_segno = start_segno + sbi->segs_per_sec;
1570         int seg_freed = 0, migrated = 0;
1571         unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1572                                                 SUM_TYPE_DATA : SUM_TYPE_NODE;
1573         int submitted = 0;
1574
1575         if (__is_large_section(sbi))
1576                 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1577
1578         /*
1579          * zone-capacity can be less than zone-size in zoned devices,
1580          * resulting in less than expected usable segments in the zone,
1581          * calculate the end segno in the zone which can be garbage collected
1582          */
1583         if (f2fs_sb_has_blkzoned(sbi))
1584                 end_segno -= sbi->segs_per_sec -
1585                                         f2fs_usable_segs_in_sec(sbi, segno);
1586
1587         sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1588
1589         /* readahead multi ssa blocks those have contiguous address */
1590         if (__is_large_section(sbi))
1591                 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1592                                         end_segno - segno, META_SSA, true);
1593
1594         /* reference all summary page */
1595         while (segno < end_segno) {
1596                 sum_page = f2fs_get_sum_page(sbi, segno++);
1597                 if (IS_ERR(sum_page)) {
1598                         int err = PTR_ERR(sum_page);
1599
1600                         end_segno = segno - 1;
1601                         for (segno = start_segno; segno < end_segno; segno++) {
1602                                 sum_page = find_get_page(META_MAPPING(sbi),
1603                                                 GET_SUM_BLOCK(sbi, segno));
1604                                 f2fs_put_page(sum_page, 0);
1605                                 f2fs_put_page(sum_page, 0);
1606                         }
1607                         return err;
1608                 }
1609                 unlock_page(sum_page);
1610         }
1611
1612         blk_start_plug(&plug);
1613
1614         for (segno = start_segno; segno < end_segno; segno++) {
1615
1616                 /* find segment summary of victim */
1617                 sum_page = find_get_page(META_MAPPING(sbi),
1618                                         GET_SUM_BLOCK(sbi, segno));
1619                 f2fs_put_page(sum_page, 0);
1620
1621                 if (get_valid_blocks(sbi, segno, false) == 0)
1622                         goto freed;
1623                 if (gc_type == BG_GC && __is_large_section(sbi) &&
1624                                 migrated >= sbi->migration_granularity)
1625                         goto skip;
1626                 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1627                         goto skip;
1628
1629                 sum = page_address(sum_page);
1630                 if (type != GET_SUM_TYPE((&sum->footer))) {
1631                         f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1632                                  segno, type, GET_SUM_TYPE((&sum->footer)));
1633                         set_sbi_flag(sbi, SBI_NEED_FSCK);
1634                         f2fs_stop_checkpoint(sbi, false);
1635                         goto skip;
1636                 }
1637
1638                 /*
1639                  * this is to avoid deadlock:
1640                  * - lock_page(sum_page)         - f2fs_replace_block
1641                  *  - check_valid_map()            - down_write(sentry_lock)
1642                  *   - down_read(sentry_lock)     - change_curseg()
1643                  *                                  - lock_page(sum_page)
1644                  */
1645                 if (type == SUM_TYPE_NODE)
1646                         submitted += gc_node_segment(sbi, sum->entries, segno,
1647                                                                 gc_type);
1648                 else
1649                         submitted += gc_data_segment(sbi, sum->entries, gc_list,
1650                                                         segno, gc_type,
1651                                                         force_migrate);
1652
1653                 stat_inc_seg_count(sbi, type, gc_type);
1654                 migrated++;
1655
1656 freed:
1657                 if (gc_type == FG_GC &&
1658                                 get_valid_blocks(sbi, segno, false) == 0)
1659                         seg_freed++;
1660
1661                 if (__is_large_section(sbi))
1662                         sbi->next_victim_seg[gc_type] =
1663                                 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1664 skip:
1665                 f2fs_put_page(sum_page, 0);
1666         }
1667
1668         if (submitted)
1669                 f2fs_submit_merged_write(sbi,
1670                                 (type == SUM_TYPE_NODE) ? NODE : DATA);
1671
1672         blk_finish_plug(&plug);
1673
1674         stat_inc_call_count(sbi->stat_info);
1675
1676         return seg_freed;
1677 }
1678
1679 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1680                         bool background, bool force, unsigned int segno)
1681 {
1682         int gc_type = sync ? FG_GC : BG_GC;
1683         int sec_freed = 0, seg_freed = 0, total_freed = 0;
1684         int ret = 0;
1685         struct cp_control cpc;
1686         unsigned int init_segno = segno;
1687         struct gc_inode_list gc_list = {
1688                 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1689                 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1690         };
1691         unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1692         unsigned long long first_skipped;
1693         unsigned int skipped_round = 0, round = 0;
1694
1695         trace_f2fs_gc_begin(sbi->sb, sync, background,
1696                                 get_pages(sbi, F2FS_DIRTY_NODES),
1697                                 get_pages(sbi, F2FS_DIRTY_DENTS),
1698                                 get_pages(sbi, F2FS_DIRTY_IMETA),
1699                                 free_sections(sbi),
1700                                 free_segments(sbi),
1701                                 reserved_segments(sbi),
1702                                 prefree_segments(sbi));
1703
1704         cpc.reason = __get_cp_reason(sbi);
1705         sbi->skipped_gc_rwsem = 0;
1706         first_skipped = last_skipped;
1707 gc_more:
1708         if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1709                 ret = -EINVAL;
1710                 goto stop;
1711         }
1712         if (unlikely(f2fs_cp_error(sbi))) {
1713                 ret = -EIO;
1714                 goto stop;
1715         }
1716
1717         if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1718                 /*
1719                  * For example, if there are many prefree_segments below given
1720                  * threshold, we can make them free by checkpoint. Then, we
1721                  * secure free segments which doesn't need fggc any more.
1722                  */
1723                 if (prefree_segments(sbi) &&
1724                                 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1725                         ret = f2fs_write_checkpoint(sbi, &cpc);
1726                         if (ret)
1727                                 goto stop;
1728                 }
1729                 if (has_not_enough_free_secs(sbi, 0, 0))
1730                         gc_type = FG_GC;
1731         }
1732
1733         /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1734         if (gc_type == BG_GC && !background) {
1735                 ret = -EINVAL;
1736                 goto stop;
1737         }
1738         ret = __get_victim(sbi, &segno, gc_type);
1739         if (ret)
1740                 goto stop;
1741
1742         seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
1743         if (gc_type == FG_GC &&
1744                 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1745                 sec_freed++;
1746         total_freed += seg_freed;
1747
1748         if (gc_type == FG_GC) {
1749                 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1750                                                 sbi->skipped_gc_rwsem)
1751                         skipped_round++;
1752                 last_skipped = sbi->skipped_atomic_files[FG_GC];
1753                 round++;
1754         }
1755
1756         if (gc_type == FG_GC && seg_freed)
1757                 sbi->cur_victim_sec = NULL_SEGNO;
1758
1759         if (sync)
1760                 goto stop;
1761
1762         if (!has_not_enough_free_secs(sbi, sec_freed, 0))
1763                 goto stop;
1764
1765         if (skipped_round <= MAX_SKIP_GC_COUNT || skipped_round * 2 < round) {
1766
1767                 /* Write checkpoint to reclaim prefree segments */
1768                 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1769                                 prefree_segments(sbi) &&
1770                                 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1771                         ret = f2fs_write_checkpoint(sbi, &cpc);
1772                         if (ret)
1773                                 goto stop;
1774                 }
1775                 segno = NULL_SEGNO;
1776                 goto gc_more;
1777         }
1778         if (first_skipped < last_skipped &&
1779                         (last_skipped - first_skipped) >
1780                                         sbi->skipped_gc_rwsem) {
1781                 f2fs_drop_inmem_pages_all(sbi, true);
1782                 segno = NULL_SEGNO;
1783                 goto gc_more;
1784         }
1785         if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1786                 ret = f2fs_write_checkpoint(sbi, &cpc);
1787 stop:
1788         SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1789         SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1790
1791         trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1792                                 get_pages(sbi, F2FS_DIRTY_NODES),
1793                                 get_pages(sbi, F2FS_DIRTY_DENTS),
1794                                 get_pages(sbi, F2FS_DIRTY_IMETA),
1795                                 free_sections(sbi),
1796                                 free_segments(sbi),
1797                                 reserved_segments(sbi),
1798                                 prefree_segments(sbi));
1799
1800         up_write(&sbi->gc_lock);
1801
1802         put_gc_inode(&gc_list);
1803
1804         if (sync && !ret)
1805                 ret = sec_freed ? 0 : -EAGAIN;
1806         return ret;
1807 }
1808
1809 int __init f2fs_create_garbage_collection_cache(void)
1810 {
1811         victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1812                                         sizeof(struct victim_entry));
1813         if (!victim_entry_slab)
1814                 return -ENOMEM;
1815         return 0;
1816 }
1817
1818 void f2fs_destroy_garbage_collection_cache(void)
1819 {
1820         kmem_cache_destroy(victim_entry_slab);
1821 }
1822
1823 static void init_atgc_management(struct f2fs_sb_info *sbi)
1824 {
1825         struct atgc_management *am = &sbi->am;
1826
1827         if (test_opt(sbi, ATGC) &&
1828                 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1829                 am->atgc_enabled = true;
1830
1831         am->root = RB_ROOT_CACHED;
1832         INIT_LIST_HEAD(&am->victim_list);
1833         am->victim_count = 0;
1834
1835         am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1836         am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1837         am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1838         am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1839 }
1840
1841 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1842 {
1843         DIRTY_I(sbi)->v_ops = &default_v_ops;
1844
1845         sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1846
1847         /* give warm/cold data area from slower device */
1848         if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1849                 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1850                                 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1851
1852         init_atgc_management(sbi);
1853 }
1854
1855 static int free_segment_range(struct f2fs_sb_info *sbi,
1856                                 unsigned int secs, bool gc_only)
1857 {
1858         unsigned int segno, next_inuse, start, end;
1859         struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1860         int gc_mode, gc_type;
1861         int err = 0;
1862         int type;
1863
1864         /* Force block allocation for GC */
1865         MAIN_SECS(sbi) -= secs;
1866         start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1867         end = MAIN_SEGS(sbi) - 1;
1868
1869         mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1870         for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1871                 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1872                         SIT_I(sbi)->last_victim[gc_mode] = 0;
1873
1874         for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1875                 if (sbi->next_victim_seg[gc_type] >= start)
1876                         sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1877         mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1878
1879         /* Move out cursegs from the target range */
1880         for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1881                 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1882
1883         /* do GC to move out valid blocks in the range */
1884         for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1885                 struct gc_inode_list gc_list = {
1886                         .ilist = LIST_HEAD_INIT(gc_list.ilist),
1887                         .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1888                 };
1889
1890                 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1891                 put_gc_inode(&gc_list);
1892
1893                 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1894                         err = -EAGAIN;
1895                         goto out;
1896                 }
1897                 if (fatal_signal_pending(current)) {
1898                         err = -ERESTARTSYS;
1899                         goto out;
1900                 }
1901         }
1902         if (gc_only)
1903                 goto out;
1904
1905         err = f2fs_write_checkpoint(sbi, &cpc);
1906         if (err)
1907                 goto out;
1908
1909         next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1910         if (next_inuse <= end) {
1911                 f2fs_err(sbi, "segno %u should be free but still inuse!",
1912                          next_inuse);
1913                 f2fs_bug_on(sbi, 1);
1914         }
1915 out:
1916         MAIN_SECS(sbi) += secs;
1917         return err;
1918 }
1919
1920 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1921 {
1922         struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1923         int section_count;
1924         int segment_count;
1925         int segment_count_main;
1926         long long block_count;
1927         int segs = secs * sbi->segs_per_sec;
1928
1929         down_write(&sbi->sb_lock);
1930
1931         section_count = le32_to_cpu(raw_sb->section_count);
1932         segment_count = le32_to_cpu(raw_sb->segment_count);
1933         segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1934         block_count = le64_to_cpu(raw_sb->block_count);
1935
1936         raw_sb->section_count = cpu_to_le32(section_count + secs);
1937         raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1938         raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1939         raw_sb->block_count = cpu_to_le64(block_count +
1940                                         (long long)segs * sbi->blocks_per_seg);
1941         if (f2fs_is_multi_device(sbi)) {
1942                 int last_dev = sbi->s_ndevs - 1;
1943                 int dev_segs =
1944                         le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1945
1946                 raw_sb->devs[last_dev].total_segments =
1947                                                 cpu_to_le32(dev_segs + segs);
1948         }
1949
1950         up_write(&sbi->sb_lock);
1951 }
1952
1953 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1954 {
1955         int segs = secs * sbi->segs_per_sec;
1956         long long blks = (long long)segs * sbi->blocks_per_seg;
1957         long long user_block_count =
1958                                 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1959
1960         SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1961         MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1962         MAIN_SECS(sbi) += secs;
1963         FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1964         FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1965         F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1966
1967         if (f2fs_is_multi_device(sbi)) {
1968                 int last_dev = sbi->s_ndevs - 1;
1969
1970                 FDEV(last_dev).total_segments =
1971                                 (int)FDEV(last_dev).total_segments + segs;
1972                 FDEV(last_dev).end_blk =
1973                                 (long long)FDEV(last_dev).end_blk + blks;
1974 #ifdef CONFIG_BLK_DEV_ZONED
1975                 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1976                                         (int)(blks >> sbi->log_blocks_per_blkz);
1977 #endif
1978         }
1979 }
1980
1981 int f2fs_resize_fs(struct file *filp, __u64 block_count)
1982 {
1983         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
1984         __u64 old_block_count, shrunk_blocks;
1985         struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1986         unsigned int secs;
1987         int err = 0;
1988         __u32 rem;
1989
1990         old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1991         if (block_count > old_block_count)
1992                 return -EINVAL;
1993
1994         if (f2fs_is_multi_device(sbi)) {
1995                 int last_dev = sbi->s_ndevs - 1;
1996                 __u64 last_segs = FDEV(last_dev).total_segments;
1997
1998                 if (block_count + last_segs * sbi->blocks_per_seg <=
1999                                                                 old_block_count)
2000                         return -EINVAL;
2001         }
2002
2003         /* new fs size should align to section size */
2004         div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2005         if (rem)
2006                 return -EINVAL;
2007
2008         if (block_count == old_block_count)
2009                 return 0;
2010
2011         if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2012                 f2fs_err(sbi, "Should run fsck to repair first.");
2013                 return -EFSCORRUPTED;
2014         }
2015
2016         if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2017                 f2fs_err(sbi, "Checkpoint should be enabled.");
2018                 return -EINVAL;
2019         }
2020
2021         err = mnt_want_write_file(filp);
2022         if (err)
2023                 return err;
2024
2025         shrunk_blocks = old_block_count - block_count;
2026         secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2027
2028         /* stop other GC */
2029         if (!down_write_trylock(&sbi->gc_lock)) {
2030                 err = -EAGAIN;
2031                 goto out_drop_write;
2032         }
2033
2034         /* stop CP to protect MAIN_SEC in free_segment_range */
2035         f2fs_lock_op(sbi);
2036
2037         spin_lock(&sbi->stat_lock);
2038         if (shrunk_blocks + valid_user_blocks(sbi) +
2039                 sbi->current_reserved_blocks + sbi->unusable_block_count +
2040                 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2041                 err = -ENOSPC;
2042         spin_unlock(&sbi->stat_lock);
2043
2044         if (err)
2045                 goto out_unlock;
2046
2047         err = free_segment_range(sbi, secs, true);
2048
2049 out_unlock:
2050         f2fs_unlock_op(sbi);
2051         up_write(&sbi->gc_lock);
2052 out_drop_write:
2053         mnt_drop_write_file(filp);
2054         if (err)
2055                 return err;
2056
2057         freeze_super(sbi->sb);
2058
2059         if (f2fs_readonly(sbi->sb)) {
2060                 thaw_super(sbi->sb);
2061                 return -EROFS;
2062         }
2063
2064         down_write(&sbi->gc_lock);
2065         mutex_lock(&sbi->cp_mutex);
2066
2067         spin_lock(&sbi->stat_lock);
2068         if (shrunk_blocks + valid_user_blocks(sbi) +
2069                 sbi->current_reserved_blocks + sbi->unusable_block_count +
2070                 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2071                 err = -ENOSPC;
2072         else
2073                 sbi->user_block_count -= shrunk_blocks;
2074         spin_unlock(&sbi->stat_lock);
2075         if (err)
2076                 goto out_err;
2077
2078         set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2079         err = free_segment_range(sbi, secs, false);
2080         if (err)
2081                 goto recover_out;
2082
2083         update_sb_metadata(sbi, -secs);
2084
2085         err = f2fs_commit_super(sbi, false);
2086         if (err) {
2087                 update_sb_metadata(sbi, secs);
2088                 goto recover_out;
2089         }
2090
2091         update_fs_metadata(sbi, -secs);
2092         clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2093         set_sbi_flag(sbi, SBI_IS_DIRTY);
2094
2095         err = f2fs_write_checkpoint(sbi, &cpc);
2096         if (err) {
2097                 update_fs_metadata(sbi, secs);
2098                 update_sb_metadata(sbi, secs);
2099                 f2fs_commit_super(sbi, false);
2100         }
2101 recover_out:
2102         clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2103         if (err) {
2104                 set_sbi_flag(sbi, SBI_NEED_FSCK);
2105                 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2106
2107                 spin_lock(&sbi->stat_lock);
2108                 sbi->user_block_count += shrunk_blocks;
2109                 spin_unlock(&sbi->stat_lock);
2110         }
2111 out_err:
2112         mutex_unlock(&sbi->cp_mutex);
2113         up_write(&sbi->gc_lock);
2114         thaw_super(sbi->sb);
2115         return err;
2116 }