GNU Linux-libre 6.1.86-gnu
[releases.git] / fs / f2fs / shrinker.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs shrinker support
4  *   the basic infra was copied from fs/ubifs/shrinker.c
5  *
6  * Copyright (c) 2015 Motorola Mobility
7  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8  */
9 #include <linux/fs.h>
10 #include <linux/f2fs_fs.h>
11
12 #include "f2fs.h"
13 #include "node.h"
14
15 static LIST_HEAD(f2fs_list);
16 static DEFINE_SPINLOCK(f2fs_list_lock);
17 static unsigned int shrinker_run_no;
18
19 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
20 {
21         return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
22 }
23
24 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
25 {
26         long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
27
28         return count > 0 ? count : 0;
29 }
30
31 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi,
32                                         enum extent_type type)
33 {
34         struct extent_tree_info *eti = &sbi->extent_tree[type];
35
36         return atomic_read(&eti->total_zombie_tree) +
37                                 atomic_read(&eti->total_ext_node);
38 }
39
40 unsigned long f2fs_shrink_count(struct shrinker *shrink,
41                                 struct shrink_control *sc)
42 {
43         struct f2fs_sb_info *sbi;
44         struct list_head *p;
45         unsigned long count = 0;
46
47         spin_lock(&f2fs_list_lock);
48         p = f2fs_list.next;
49         while (p != &f2fs_list) {
50                 sbi = list_entry(p, struct f2fs_sb_info, s_list);
51
52                 /* stop f2fs_put_super */
53                 if (!mutex_trylock(&sbi->umount_mutex)) {
54                         p = p->next;
55                         continue;
56                 }
57                 spin_unlock(&f2fs_list_lock);
58
59                 /* count read extent cache entries */
60                 count += __count_extent_cache(sbi, EX_READ);
61
62                 /* count clean nat cache entries */
63                 count += __count_nat_entries(sbi);
64
65                 /* count free nids cache entries */
66                 count += __count_free_nids(sbi);
67
68                 spin_lock(&f2fs_list_lock);
69                 p = p->next;
70                 mutex_unlock(&sbi->umount_mutex);
71         }
72         spin_unlock(&f2fs_list_lock);
73         return count;
74 }
75
76 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
77                                 struct shrink_control *sc)
78 {
79         unsigned long nr = sc->nr_to_scan;
80         struct f2fs_sb_info *sbi;
81         struct list_head *p;
82         unsigned int run_no;
83         unsigned long freed = 0;
84
85         spin_lock(&f2fs_list_lock);
86         do {
87                 run_no = ++shrinker_run_no;
88         } while (run_no == 0);
89         p = f2fs_list.next;
90         while (p != &f2fs_list) {
91                 sbi = list_entry(p, struct f2fs_sb_info, s_list);
92
93                 if (sbi->shrinker_run_no == run_no)
94                         break;
95
96                 /* stop f2fs_put_super */
97                 if (!mutex_trylock(&sbi->umount_mutex)) {
98                         p = p->next;
99                         continue;
100                 }
101                 spin_unlock(&f2fs_list_lock);
102
103                 sbi->shrinker_run_no = run_no;
104
105                 /* shrink read extent cache entries */
106                 freed += f2fs_shrink_read_extent_tree(sbi, nr >> 1);
107
108                 /* shrink clean nat cache entries */
109                 if (freed < nr)
110                         freed += f2fs_try_to_free_nats(sbi, nr - freed);
111
112                 /* shrink free nids cache entries */
113                 if (freed < nr)
114                         freed += f2fs_try_to_free_nids(sbi, nr - freed);
115
116                 spin_lock(&f2fs_list_lock);
117                 p = p->next;
118                 list_move_tail(&sbi->s_list, &f2fs_list);
119                 mutex_unlock(&sbi->umount_mutex);
120                 if (freed >= nr)
121                         break;
122         }
123         spin_unlock(&f2fs_list_lock);
124         return freed;
125 }
126
127 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
128 {
129         spin_lock(&f2fs_list_lock);
130         list_add_tail(&sbi->s_list, &f2fs_list);
131         spin_unlock(&f2fs_list_lock);
132 }
133
134 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
135 {
136         f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ));
137
138         spin_lock(&f2fs_list_lock);
139         list_del_init(&sbi->s_list);
140         spin_unlock(&f2fs_list_lock);
141 }