GNU Linux-libre 4.9.308-gnu1
[releases.git] / fs / minix / itree_common.c
1 /* Generic part */
2
3 typedef struct {
4         block_t *p;
5         block_t key;
6         struct buffer_head *bh;
7 } Indirect;
8
9 static DEFINE_RWLOCK(pointers_lock);
10
11 static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
12 {
13         p->key = *(p->p = v);
14         p->bh = bh;
15 }
16
17 static inline int verify_chain(Indirect *from, Indirect *to)
18 {
19         while (from <= to && from->key == *from->p)
20                 from++;
21         return (from > to);
22 }
23
24 static inline block_t *block_end(struct buffer_head *bh)
25 {
26         return (block_t *)((char*)bh->b_data + bh->b_size);
27 }
28
29 static inline Indirect *get_branch(struct inode *inode,
30                                         int depth,
31                                         int *offsets,
32                                         Indirect chain[DEPTH],
33                                         int *err)
34 {
35         struct super_block *sb = inode->i_sb;
36         Indirect *p = chain;
37         struct buffer_head *bh;
38
39         *err = 0;
40         /* i_data is not going away, no lock needed */
41         add_chain (chain, NULL, i_data(inode) + *offsets);
42         if (!p->key)
43                 goto no_block;
44         while (--depth) {
45                 bh = sb_bread(sb, block_to_cpu(p->key));
46                 if (!bh)
47                         goto failure;
48                 read_lock(&pointers_lock);
49                 if (!verify_chain(chain, p))
50                         goto changed;
51                 add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
52                 read_unlock(&pointers_lock);
53                 if (!p->key)
54                         goto no_block;
55         }
56         return NULL;
57
58 changed:
59         read_unlock(&pointers_lock);
60         brelse(bh);
61         *err = -EAGAIN;
62         goto no_block;
63 failure:
64         *err = -EIO;
65 no_block:
66         return p;
67 }
68
69 static int alloc_branch(struct inode *inode,
70                              int num,
71                              int *offsets,
72                              Indirect *branch)
73 {
74         int n = 0;
75         int i;
76         int parent = minix_new_block(inode);
77         int err = -ENOSPC;
78
79         branch[0].key = cpu_to_block(parent);
80         if (parent) for (n = 1; n < num; n++) {
81                 struct buffer_head *bh;
82                 /* Allocate the next block */
83                 int nr = minix_new_block(inode);
84                 if (!nr)
85                         break;
86                 branch[n].key = cpu_to_block(nr);
87                 bh = sb_getblk(inode->i_sb, parent);
88                 if (!bh) {
89                         minix_free_block(inode, nr);
90                         err = -ENOMEM;
91                         break;
92                 }
93                 lock_buffer(bh);
94                 memset(bh->b_data, 0, bh->b_size);
95                 branch[n].bh = bh;
96                 branch[n].p = (block_t*) bh->b_data + offsets[n];
97                 *branch[n].p = branch[n].key;
98                 set_buffer_uptodate(bh);
99                 unlock_buffer(bh);
100                 mark_buffer_dirty_inode(bh, inode);
101                 parent = nr;
102         }
103         if (n == num)
104                 return 0;
105
106         /* Allocation failed, free what we already allocated */
107         for (i = 1; i < n; i++)
108                 bforget(branch[i].bh);
109         for (i = 0; i < n; i++)
110                 minix_free_block(inode, block_to_cpu(branch[i].key));
111         return err;
112 }
113
114 static inline int splice_branch(struct inode *inode,
115                                      Indirect chain[DEPTH],
116                                      Indirect *where,
117                                      int num)
118 {
119         int i;
120
121         write_lock(&pointers_lock);
122
123         /* Verify that place we are splicing to is still there and vacant */
124         if (!verify_chain(chain, where-1) || *where->p)
125                 goto changed;
126
127         *where->p = where->key;
128
129         write_unlock(&pointers_lock);
130
131         /* We are done with atomic stuff, now do the rest of housekeeping */
132
133         inode->i_ctime = current_time(inode);
134
135         /* had we spliced it onto indirect block? */
136         if (where->bh)
137                 mark_buffer_dirty_inode(where->bh, inode);
138
139         mark_inode_dirty(inode);
140         return 0;
141
142 changed:
143         write_unlock(&pointers_lock);
144         for (i = 1; i < num; i++)
145                 bforget(where[i].bh);
146         for (i = 0; i < num; i++)
147                 minix_free_block(inode, block_to_cpu(where[i].key));
148         return -EAGAIN;
149 }
150
151 static inline int get_block(struct inode * inode, sector_t block,
152                         struct buffer_head *bh, int create)
153 {
154         int err = -EIO;
155         int offsets[DEPTH];
156         Indirect chain[DEPTH];
157         Indirect *partial;
158         int left;
159         int depth = block_to_path(inode, block, offsets);
160
161         if (depth == 0)
162                 goto out;
163
164 reread:
165         partial = get_branch(inode, depth, offsets, chain, &err);
166
167         /* Simplest case - block found, no allocation needed */
168         if (!partial) {
169 got_it:
170                 map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
171                 /* Clean up and exit */
172                 partial = chain+depth-1; /* the whole chain */
173                 goto cleanup;
174         }
175
176         /* Next simple case - plain lookup or failed read of indirect block */
177         if (!create || err == -EIO) {
178 cleanup:
179                 while (partial > chain) {
180                         brelse(partial->bh);
181                         partial--;
182                 }
183 out:
184                 return err;
185         }
186
187         /*
188          * Indirect block might be removed by truncate while we were
189          * reading it. Handling of that case (forget what we've got and
190          * reread) is taken out of the main path.
191          */
192         if (err == -EAGAIN)
193                 goto changed;
194
195         left = (chain + depth) - partial;
196         err = alloc_branch(inode, left, offsets+(partial-chain), partial);
197         if (err)
198                 goto cleanup;
199
200         if (splice_branch(inode, chain, partial, left) < 0)
201                 goto changed;
202
203         set_buffer_new(bh);
204         goto got_it;
205
206 changed:
207         while (partial > chain) {
208                 brelse(partial->bh);
209                 partial--;
210         }
211         goto reread;
212 }
213
214 static inline int all_zeroes(block_t *p, block_t *q)
215 {
216         while (p < q)
217                 if (*p++)
218                         return 0;
219         return 1;
220 }
221
222 static Indirect *find_shared(struct inode *inode,
223                                 int depth,
224                                 int offsets[DEPTH],
225                                 Indirect chain[DEPTH],
226                                 block_t *top)
227 {
228         Indirect *partial, *p;
229         int k, err;
230
231         *top = 0;
232         for (k = depth; k > 1 && !offsets[k-1]; k--)
233                 ;
234         partial = get_branch(inode, k, offsets, chain, &err);
235
236         write_lock(&pointers_lock);
237         if (!partial)
238                 partial = chain + k-1;
239         if (!partial->key && *partial->p) {
240                 write_unlock(&pointers_lock);
241                 goto no_top;
242         }
243         for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
244                 ;
245         if (p == chain + k - 1 && p > chain) {
246                 p->p--;
247         } else {
248                 *top = *p->p;
249                 *p->p = 0;
250         }
251         write_unlock(&pointers_lock);
252
253         while(partial > p)
254         {
255                 brelse(partial->bh);
256                 partial--;
257         }
258 no_top:
259         return partial;
260 }
261
262 static inline void free_data(struct inode *inode, block_t *p, block_t *q)
263 {
264         unsigned long nr;
265
266         for ( ; p < q ; p++) {
267                 nr = block_to_cpu(*p);
268                 if (nr) {
269                         *p = 0;
270                         minix_free_block(inode, nr);
271                 }
272         }
273 }
274
275 static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
276 {
277         struct buffer_head * bh;
278         unsigned long nr;
279
280         if (depth--) {
281                 for ( ; p < q ; p++) {
282                         nr = block_to_cpu(*p);
283                         if (!nr)
284                                 continue;
285                         *p = 0;
286                         bh = sb_bread(inode->i_sb, nr);
287                         if (!bh)
288                                 continue;
289                         free_branches(inode, (block_t*)bh->b_data,
290                                       block_end(bh), depth);
291                         bforget(bh);
292                         minix_free_block(inode, nr);
293                         mark_inode_dirty(inode);
294                 }
295         } else
296                 free_data(inode, p, q);
297 }
298
299 static inline void truncate (struct inode * inode)
300 {
301         struct super_block *sb = inode->i_sb;
302         block_t *idata = i_data(inode);
303         int offsets[DEPTH];
304         Indirect chain[DEPTH];
305         Indirect *partial;
306         block_t nr = 0;
307         int n;
308         int first_whole;
309         long iblock;
310
311         iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits;
312         block_truncate_page(inode->i_mapping, inode->i_size, get_block);
313
314         n = block_to_path(inode, iblock, offsets);
315         if (!n)
316                 return;
317
318         if (n == 1) {
319                 free_data(inode, idata+offsets[0], idata + DIRECT);
320                 first_whole = 0;
321                 goto do_indirects;
322         }
323
324         first_whole = offsets[0] + 1 - DIRECT;
325         partial = find_shared(inode, n, offsets, chain, &nr);
326         if (nr) {
327                 if (partial == chain)
328                         mark_inode_dirty(inode);
329                 else
330                         mark_buffer_dirty_inode(partial->bh, inode);
331                 free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
332         }
333         /* Clear the ends of indirect blocks on the shared branch */
334         while (partial > chain) {
335                 free_branches(inode, partial->p + 1, block_end(partial->bh),
336                                 (chain+n-1) - partial);
337                 mark_buffer_dirty_inode(partial->bh, inode);
338                 brelse (partial->bh);
339                 partial--;
340         }
341 do_indirects:
342         /* Kill the remaining (whole) subtrees */
343         while (first_whole < DEPTH-1) {
344                 nr = idata[DIRECT+first_whole];
345                 if (nr) {
346                         idata[DIRECT+first_whole] = 0;
347                         mark_inode_dirty(inode);
348                         free_branches(inode, &nr, &nr+1, first_whole+1);
349                 }
350                 first_whole++;
351         }
352         inode->i_mtime = inode->i_ctime = current_time(inode);
353         mark_inode_dirty(inode);
354 }
355
356 static inline unsigned nblocks(loff_t size, struct super_block *sb)
357 {
358         int k = sb->s_blocksize_bits - 10;
359         unsigned blocks, res, direct = DIRECT, i = DEPTH;
360         blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
361         res = blocks;
362         while (--i && blocks > direct) {
363                 blocks -= direct;
364                 blocks += sb->s_blocksize/sizeof(block_t) - 1;
365                 blocks /= sb->s_blocksize/sizeof(block_t);
366                 res += blocks;
367                 direct = 1;
368         }
369         return res;
370 }