GNU Linux-libre 5.10.153-gnu1
[releases.git] / fs / nilfs2 / inode.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * inode.c - NILFS inode operations.
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi.
8  *
9  */
10
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/mpage.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/uio.h>
17 #include <linux/fiemap.h>
18 #include "nilfs.h"
19 #include "btnode.h"
20 #include "segment.h"
21 #include "page.h"
22 #include "mdt.h"
23 #include "cpfile.h"
24 #include "ifile.h"
25
26 /**
27  * struct nilfs_iget_args - arguments used during comparison between inodes
28  * @ino: inode number
29  * @cno: checkpoint number
30  * @root: pointer on NILFS root object (mounted checkpoint)
31  * @for_gc: inode for GC flag
32  * @for_btnc: inode for B-tree node cache flag
33  * @for_shadow: inode for shadowed page cache flag
34  */
35 struct nilfs_iget_args {
36         u64 ino;
37         __u64 cno;
38         struct nilfs_root *root;
39         bool for_gc;
40         bool for_btnc;
41         bool for_shadow;
42 };
43
44 static int nilfs_iget_test(struct inode *inode, void *opaque);
45
46 void nilfs_inode_add_blocks(struct inode *inode, int n)
47 {
48         struct nilfs_root *root = NILFS_I(inode)->i_root;
49
50         inode_add_bytes(inode, i_blocksize(inode) * n);
51         if (root)
52                 atomic64_add(n, &root->blocks_count);
53 }
54
55 void nilfs_inode_sub_blocks(struct inode *inode, int n)
56 {
57         struct nilfs_root *root = NILFS_I(inode)->i_root;
58
59         inode_sub_bytes(inode, i_blocksize(inode) * n);
60         if (root)
61                 atomic64_sub(n, &root->blocks_count);
62 }
63
64 /**
65  * nilfs_get_block() - get a file block on the filesystem (callback function)
66  * @inode - inode struct of the target file
67  * @blkoff - file block number
68  * @bh_result - buffer head to be mapped on
69  * @create - indicate whether allocating the block or not when it has not
70  *      been allocated yet.
71  *
72  * This function does not issue actual read request of the specified data
73  * block. It is done by VFS.
74  */
75 int nilfs_get_block(struct inode *inode, sector_t blkoff,
76                     struct buffer_head *bh_result, int create)
77 {
78         struct nilfs_inode_info *ii = NILFS_I(inode);
79         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
80         __u64 blknum = 0;
81         int err = 0, ret;
82         unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
83
84         down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
85         ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
86         up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
87         if (ret >= 0) { /* found */
88                 map_bh(bh_result, inode->i_sb, blknum);
89                 if (ret > 0)
90                         bh_result->b_size = (ret << inode->i_blkbits);
91                 goto out;
92         }
93         /* data block was not found */
94         if (ret == -ENOENT && create) {
95                 struct nilfs_transaction_info ti;
96
97                 bh_result->b_blocknr = 0;
98                 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
99                 if (unlikely(err))
100                         goto out;
101                 err = nilfs_bmap_insert(ii->i_bmap, blkoff,
102                                         (unsigned long)bh_result);
103                 if (unlikely(err != 0)) {
104                         if (err == -EEXIST) {
105                                 /*
106                                  * The get_block() function could be called
107                                  * from multiple callers for an inode.
108                                  * However, the page having this block must
109                                  * be locked in this case.
110                                  */
111                                 nilfs_warn(inode->i_sb,
112                                            "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
113                                            __func__, inode->i_ino,
114                                            (unsigned long long)blkoff);
115                                 err = 0;
116                         }
117                         nilfs_transaction_abort(inode->i_sb);
118                         goto out;
119                 }
120                 nilfs_mark_inode_dirty_sync(inode);
121                 nilfs_transaction_commit(inode->i_sb); /* never fails */
122                 /* Error handling should be detailed */
123                 set_buffer_new(bh_result);
124                 set_buffer_delay(bh_result);
125                 map_bh(bh_result, inode->i_sb, 0);
126                 /* Disk block number must be changed to proper value */
127
128         } else if (ret == -ENOENT) {
129                 /*
130                  * not found is not error (e.g. hole); must return without
131                  * the mapped state flag.
132                  */
133                 ;
134         } else {
135                 err = ret;
136         }
137
138  out:
139         return err;
140 }
141
142 /**
143  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
144  * address_space_operations.
145  * @file - file struct of the file to be read
146  * @page - the page to be read
147  */
148 static int nilfs_readpage(struct file *file, struct page *page)
149 {
150         return mpage_readpage(page, nilfs_get_block);
151 }
152
153 static void nilfs_readahead(struct readahead_control *rac)
154 {
155         mpage_readahead(rac, nilfs_get_block);
156 }
157
158 static int nilfs_writepages(struct address_space *mapping,
159                             struct writeback_control *wbc)
160 {
161         struct inode *inode = mapping->host;
162         int err = 0;
163
164         if (sb_rdonly(inode->i_sb)) {
165                 nilfs_clear_dirty_pages(mapping, false);
166                 return -EROFS;
167         }
168
169         if (wbc->sync_mode == WB_SYNC_ALL)
170                 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
171                                                     wbc->range_start,
172                                                     wbc->range_end);
173         return err;
174 }
175
176 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
177 {
178         struct inode *inode = page->mapping->host;
179         int err;
180
181         if (sb_rdonly(inode->i_sb)) {
182                 /*
183                  * It means that filesystem was remounted in read-only
184                  * mode because of error or metadata corruption. But we
185                  * have dirty pages that try to be flushed in background.
186                  * So, here we simply discard this dirty page.
187                  */
188                 nilfs_clear_dirty_page(page, false);
189                 unlock_page(page);
190                 return -EROFS;
191         }
192
193         redirty_page_for_writepage(wbc, page);
194         unlock_page(page);
195
196         if (wbc->sync_mode == WB_SYNC_ALL) {
197                 err = nilfs_construct_segment(inode->i_sb);
198                 if (unlikely(err))
199                         return err;
200         } else if (wbc->for_reclaim)
201                 nilfs_flush_segment(inode->i_sb, inode->i_ino);
202
203         return 0;
204 }
205
206 static int nilfs_set_page_dirty(struct page *page)
207 {
208         struct inode *inode = page->mapping->host;
209         int ret = __set_page_dirty_nobuffers(page);
210
211         if (page_has_buffers(page)) {
212                 unsigned int nr_dirty = 0;
213                 struct buffer_head *bh, *head;
214
215                 /*
216                  * This page is locked by callers, and no other thread
217                  * concurrently marks its buffers dirty since they are
218                  * only dirtied through routines in fs/buffer.c in
219                  * which call sites of mark_buffer_dirty are protected
220                  * by page lock.
221                  */
222                 bh = head = page_buffers(page);
223                 do {
224                         /* Do not mark hole blocks dirty */
225                         if (buffer_dirty(bh) || !buffer_mapped(bh))
226                                 continue;
227
228                         set_buffer_dirty(bh);
229                         nr_dirty++;
230                 } while (bh = bh->b_this_page, bh != head);
231
232                 if (nr_dirty)
233                         nilfs_set_file_dirty(inode, nr_dirty);
234         } else if (ret) {
235                 unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
236
237                 nilfs_set_file_dirty(inode, nr_dirty);
238         }
239         return ret;
240 }
241
242 void nilfs_write_failed(struct address_space *mapping, loff_t to)
243 {
244         struct inode *inode = mapping->host;
245
246         if (to > inode->i_size) {
247                 truncate_pagecache(inode, inode->i_size);
248                 nilfs_truncate(inode);
249         }
250 }
251
252 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
253                              loff_t pos, unsigned len, unsigned flags,
254                              struct page **pagep, void **fsdata)
255
256 {
257         struct inode *inode = mapping->host;
258         int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
259
260         if (unlikely(err))
261                 return err;
262
263         err = block_write_begin(mapping, pos, len, flags, pagep,
264                                 nilfs_get_block);
265         if (unlikely(err)) {
266                 nilfs_write_failed(mapping, pos + len);
267                 nilfs_transaction_abort(inode->i_sb);
268         }
269         return err;
270 }
271
272 static int nilfs_write_end(struct file *file, struct address_space *mapping,
273                            loff_t pos, unsigned len, unsigned copied,
274                            struct page *page, void *fsdata)
275 {
276         struct inode *inode = mapping->host;
277         unsigned int start = pos & (PAGE_SIZE - 1);
278         unsigned int nr_dirty;
279         int err;
280
281         nr_dirty = nilfs_page_count_clean_buffers(page, start,
282                                                   start + copied);
283         copied = generic_write_end(file, mapping, pos, len, copied, page,
284                                    fsdata);
285         nilfs_set_file_dirty(inode, nr_dirty);
286         err = nilfs_transaction_commit(inode->i_sb);
287         return err ? : copied;
288 }
289
290 static ssize_t
291 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
292 {
293         struct inode *inode = file_inode(iocb->ki_filp);
294
295         if (iov_iter_rw(iter) == WRITE)
296                 return 0;
297
298         /* Needs synchronization with the cleaner */
299         return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
300 }
301
302 const struct address_space_operations nilfs_aops = {
303         .writepage              = nilfs_writepage,
304         .readpage               = nilfs_readpage,
305         .writepages             = nilfs_writepages,
306         .set_page_dirty         = nilfs_set_page_dirty,
307         .readahead              = nilfs_readahead,
308         .write_begin            = nilfs_write_begin,
309         .write_end              = nilfs_write_end,
310         /* .releasepage         = nilfs_releasepage, */
311         .invalidatepage         = block_invalidatepage,
312         .direct_IO              = nilfs_direct_IO,
313         .is_partially_uptodate  = block_is_partially_uptodate,
314 };
315
316 static int nilfs_insert_inode_locked(struct inode *inode,
317                                      struct nilfs_root *root,
318                                      unsigned long ino)
319 {
320         struct nilfs_iget_args args = {
321                 .ino = ino, .root = root, .cno = 0, .for_gc = false,
322                 .for_btnc = false, .for_shadow = false
323         };
324
325         return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
326 }
327
328 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
329 {
330         struct super_block *sb = dir->i_sb;
331         struct the_nilfs *nilfs = sb->s_fs_info;
332         struct inode *inode;
333         struct nilfs_inode_info *ii;
334         struct nilfs_root *root;
335         struct buffer_head *bh;
336         int err = -ENOMEM;
337         ino_t ino;
338
339         inode = new_inode(sb);
340         if (unlikely(!inode))
341                 goto failed;
342
343         mapping_set_gfp_mask(inode->i_mapping,
344                            mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
345
346         root = NILFS_I(dir)->i_root;
347         ii = NILFS_I(inode);
348         ii->i_state = BIT(NILFS_I_NEW);
349         ii->i_root = root;
350
351         err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
352         if (unlikely(err))
353                 goto failed_ifile_create_inode;
354         /* reference count of i_bh inherits from nilfs_mdt_read_block() */
355
356         if (unlikely(ino < NILFS_USER_INO)) {
357                 nilfs_warn(sb,
358                            "inode bitmap is inconsistent for reserved inodes");
359                 do {
360                         brelse(bh);
361                         err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
362                         if (unlikely(err))
363                                 goto failed_ifile_create_inode;
364                 } while (ino < NILFS_USER_INO);
365
366                 nilfs_info(sb, "repaired inode bitmap for reserved inodes");
367         }
368         ii->i_bh = bh;
369
370         atomic64_inc(&root->inodes_count);
371         inode_init_owner(inode, dir, mode);
372         inode->i_ino = ino;
373         inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
374
375         if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
376                 err = nilfs_bmap_read(ii->i_bmap, NULL);
377                 if (err < 0)
378                         goto failed_after_creation;
379
380                 set_bit(NILFS_I_BMAP, &ii->i_state);
381                 /* No lock is needed; iget() ensures it. */
382         }
383
384         ii->i_flags = nilfs_mask_flags(
385                 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
386
387         /* ii->i_file_acl = 0; */
388         /* ii->i_dir_acl = 0; */
389         ii->i_dir_start_lookup = 0;
390         nilfs_set_inode_flags(inode);
391         spin_lock(&nilfs->ns_next_gen_lock);
392         inode->i_generation = nilfs->ns_next_generation++;
393         spin_unlock(&nilfs->ns_next_gen_lock);
394         if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
395                 err = -EIO;
396                 goto failed_after_creation;
397         }
398
399         err = nilfs_init_acl(inode, dir);
400         if (unlikely(err))
401                 /*
402                  * Never occur.  When supporting nilfs_init_acl(),
403                  * proper cancellation of above jobs should be considered.
404                  */
405                 goto failed_after_creation;
406
407         return inode;
408
409  failed_after_creation:
410         clear_nlink(inode);
411         if (inode->i_state & I_NEW)
412                 unlock_new_inode(inode);
413         iput(inode);  /*
414                        * raw_inode will be deleted through
415                        * nilfs_evict_inode().
416                        */
417         goto failed;
418
419  failed_ifile_create_inode:
420         make_bad_inode(inode);
421         iput(inode);
422  failed:
423         return ERR_PTR(err);
424 }
425
426 void nilfs_set_inode_flags(struct inode *inode)
427 {
428         unsigned int flags = NILFS_I(inode)->i_flags;
429         unsigned int new_fl = 0;
430
431         if (flags & FS_SYNC_FL)
432                 new_fl |= S_SYNC;
433         if (flags & FS_APPEND_FL)
434                 new_fl |= S_APPEND;
435         if (flags & FS_IMMUTABLE_FL)
436                 new_fl |= S_IMMUTABLE;
437         if (flags & FS_NOATIME_FL)
438                 new_fl |= S_NOATIME;
439         if (flags & FS_DIRSYNC_FL)
440                 new_fl |= S_DIRSYNC;
441         inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
442                         S_NOATIME | S_DIRSYNC);
443 }
444
445 int nilfs_read_inode_common(struct inode *inode,
446                             struct nilfs_inode *raw_inode)
447 {
448         struct nilfs_inode_info *ii = NILFS_I(inode);
449         int err;
450
451         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
452         i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
453         i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
454         set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
455         inode->i_size = le64_to_cpu(raw_inode->i_size);
456         inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
457         inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
458         inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
459         inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
460         inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
461         inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
462         if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
463                 return -EIO; /* this inode is for metadata and corrupted */
464         if (inode->i_nlink == 0)
465                 return -ESTALE; /* this inode is deleted */
466
467         inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
468         ii->i_flags = le32_to_cpu(raw_inode->i_flags);
469 #if 0
470         ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
471         ii->i_dir_acl = S_ISREG(inode->i_mode) ?
472                 0 : le32_to_cpu(raw_inode->i_dir_acl);
473 #endif
474         ii->i_dir_start_lookup = 0;
475         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
476
477         if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
478             S_ISLNK(inode->i_mode)) {
479                 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
480                 if (err < 0)
481                         return err;
482                 set_bit(NILFS_I_BMAP, &ii->i_state);
483                 /* No lock is needed; iget() ensures it. */
484         }
485         return 0;
486 }
487
488 static int __nilfs_read_inode(struct super_block *sb,
489                               struct nilfs_root *root, unsigned long ino,
490                               struct inode *inode)
491 {
492         struct the_nilfs *nilfs = sb->s_fs_info;
493         struct buffer_head *bh;
494         struct nilfs_inode *raw_inode;
495         int err;
496
497         down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
498         err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
499         if (unlikely(err))
500                 goto bad_inode;
501
502         raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
503
504         err = nilfs_read_inode_common(inode, raw_inode);
505         if (err)
506                 goto failed_unmap;
507
508         if (S_ISREG(inode->i_mode)) {
509                 inode->i_op = &nilfs_file_inode_operations;
510                 inode->i_fop = &nilfs_file_operations;
511                 inode->i_mapping->a_ops = &nilfs_aops;
512         } else if (S_ISDIR(inode->i_mode)) {
513                 inode->i_op = &nilfs_dir_inode_operations;
514                 inode->i_fop = &nilfs_dir_operations;
515                 inode->i_mapping->a_ops = &nilfs_aops;
516         } else if (S_ISLNK(inode->i_mode)) {
517                 inode->i_op = &nilfs_symlink_inode_operations;
518                 inode_nohighmem(inode);
519                 inode->i_mapping->a_ops = &nilfs_aops;
520         } else {
521                 inode->i_op = &nilfs_special_inode_operations;
522                 init_special_inode(
523                         inode, inode->i_mode,
524                         huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
525         }
526         nilfs_ifile_unmap_inode(root->ifile, ino, bh);
527         brelse(bh);
528         up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
529         nilfs_set_inode_flags(inode);
530         mapping_set_gfp_mask(inode->i_mapping,
531                            mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
532         return 0;
533
534  failed_unmap:
535         nilfs_ifile_unmap_inode(root->ifile, ino, bh);
536         brelse(bh);
537
538  bad_inode:
539         up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
540         return err;
541 }
542
543 static int nilfs_iget_test(struct inode *inode, void *opaque)
544 {
545         struct nilfs_iget_args *args = opaque;
546         struct nilfs_inode_info *ii;
547
548         if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
549                 return 0;
550
551         ii = NILFS_I(inode);
552         if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
553                 if (!args->for_btnc)
554                         return 0;
555         } else if (args->for_btnc) {
556                 return 0;
557         }
558         if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
559                 if (!args->for_shadow)
560                         return 0;
561         } else if (args->for_shadow) {
562                 return 0;
563         }
564
565         if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
566                 return !args->for_gc;
567
568         return args->for_gc && args->cno == ii->i_cno;
569 }
570
571 static int nilfs_iget_set(struct inode *inode, void *opaque)
572 {
573         struct nilfs_iget_args *args = opaque;
574
575         inode->i_ino = args->ino;
576         NILFS_I(inode)->i_cno = args->cno;
577         NILFS_I(inode)->i_root = args->root;
578         if (args->root && args->ino == NILFS_ROOT_INO)
579                 nilfs_get_root(args->root);
580
581         if (args->for_gc)
582                 NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
583         if (args->for_btnc)
584                 NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
585         if (args->for_shadow)
586                 NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
587         return 0;
588 }
589
590 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
591                             unsigned long ino)
592 {
593         struct nilfs_iget_args args = {
594                 .ino = ino, .root = root, .cno = 0, .for_gc = false,
595                 .for_btnc = false, .for_shadow = false
596         };
597
598         return ilookup5(sb, ino, nilfs_iget_test, &args);
599 }
600
601 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
602                                 unsigned long ino)
603 {
604         struct nilfs_iget_args args = {
605                 .ino = ino, .root = root, .cno = 0, .for_gc = false,
606                 .for_btnc = false, .for_shadow = false
607         };
608
609         return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
610 }
611
612 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
613                          unsigned long ino)
614 {
615         struct inode *inode;
616         int err;
617
618         inode = nilfs_iget_locked(sb, root, ino);
619         if (unlikely(!inode))
620                 return ERR_PTR(-ENOMEM);
621         if (!(inode->i_state & I_NEW))
622                 return inode;
623
624         err = __nilfs_read_inode(sb, root, ino, inode);
625         if (unlikely(err)) {
626                 iget_failed(inode);
627                 return ERR_PTR(err);
628         }
629         unlock_new_inode(inode);
630         return inode;
631 }
632
633 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
634                                 __u64 cno)
635 {
636         struct nilfs_iget_args args = {
637                 .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
638                 .for_btnc = false, .for_shadow = false
639         };
640         struct inode *inode;
641         int err;
642
643         inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
644         if (unlikely(!inode))
645                 return ERR_PTR(-ENOMEM);
646         if (!(inode->i_state & I_NEW))
647                 return inode;
648
649         err = nilfs_init_gcinode(inode);
650         if (unlikely(err)) {
651                 iget_failed(inode);
652                 return ERR_PTR(err);
653         }
654         unlock_new_inode(inode);
655         return inode;
656 }
657
658 /**
659  * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
660  * @inode: inode object
661  *
662  * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
663  * or does nothing if the inode already has it.  This function allocates
664  * an additional inode to maintain page cache of B-tree nodes one-on-one.
665  *
666  * Return Value: On success, 0 is returned. On errors, one of the following
667  * negative error code is returned.
668  *
669  * %-ENOMEM - Insufficient memory available.
670  */
671 int nilfs_attach_btree_node_cache(struct inode *inode)
672 {
673         struct nilfs_inode_info *ii = NILFS_I(inode);
674         struct inode *btnc_inode;
675         struct nilfs_iget_args args;
676
677         if (ii->i_assoc_inode)
678                 return 0;
679
680         args.ino = inode->i_ino;
681         args.root = ii->i_root;
682         args.cno = ii->i_cno;
683         args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
684         args.for_btnc = true;
685         args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
686
687         btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
688                                   nilfs_iget_set, &args);
689         if (unlikely(!btnc_inode))
690                 return -ENOMEM;
691         if (btnc_inode->i_state & I_NEW) {
692                 nilfs_init_btnc_inode(btnc_inode);
693                 unlock_new_inode(btnc_inode);
694         }
695         NILFS_I(btnc_inode)->i_assoc_inode = inode;
696         NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
697         ii->i_assoc_inode = btnc_inode;
698
699         return 0;
700 }
701
702 /**
703  * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
704  * @inode: inode object
705  *
706  * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
707  * holder inode bound to @inode, or does nothing if @inode doesn't have it.
708  */
709 void nilfs_detach_btree_node_cache(struct inode *inode)
710 {
711         struct nilfs_inode_info *ii = NILFS_I(inode);
712         struct inode *btnc_inode = ii->i_assoc_inode;
713
714         if (btnc_inode) {
715                 NILFS_I(btnc_inode)->i_assoc_inode = NULL;
716                 ii->i_assoc_inode = NULL;
717                 iput(btnc_inode);
718         }
719 }
720
721 /**
722  * nilfs_iget_for_shadow - obtain inode for shadow mapping
723  * @inode: inode object that uses shadow mapping
724  *
725  * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
726  * caches for shadow mapping.  The page cache for data pages is set up
727  * in one inode and the one for b-tree node pages is set up in the
728  * other inode, which is attached to the former inode.
729  *
730  * Return Value: On success, a pointer to the inode for data pages is
731  * returned. On errors, one of the following negative error code is returned
732  * in a pointer type.
733  *
734  * %-ENOMEM - Insufficient memory available.
735  */
736 struct inode *nilfs_iget_for_shadow(struct inode *inode)
737 {
738         struct nilfs_iget_args args = {
739                 .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
740                 .for_btnc = false, .for_shadow = true
741         };
742         struct inode *s_inode;
743         int err;
744
745         s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
746                                nilfs_iget_set, &args);
747         if (unlikely(!s_inode))
748                 return ERR_PTR(-ENOMEM);
749         if (!(s_inode->i_state & I_NEW))
750                 return inode;
751
752         NILFS_I(s_inode)->i_flags = 0;
753         memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
754         mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
755
756         err = nilfs_attach_btree_node_cache(s_inode);
757         if (unlikely(err)) {
758                 iget_failed(s_inode);
759                 return ERR_PTR(err);
760         }
761         unlock_new_inode(s_inode);
762         return s_inode;
763 }
764
765 void nilfs_write_inode_common(struct inode *inode,
766                               struct nilfs_inode *raw_inode, int has_bmap)
767 {
768         struct nilfs_inode_info *ii = NILFS_I(inode);
769
770         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
771         raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
772         raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
773         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
774         raw_inode->i_size = cpu_to_le64(inode->i_size);
775         raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
776         raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
777         raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
778         raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
779         raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
780
781         raw_inode->i_flags = cpu_to_le32(ii->i_flags);
782         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
783
784         if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
785                 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
786
787                 /* zero-fill unused portion in the case of super root block */
788                 raw_inode->i_xattr = 0;
789                 raw_inode->i_pad = 0;
790                 memset((void *)raw_inode + sizeof(*raw_inode), 0,
791                        nilfs->ns_inode_size - sizeof(*raw_inode));
792         }
793
794         if (has_bmap)
795                 nilfs_bmap_write(ii->i_bmap, raw_inode);
796         else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
797                 raw_inode->i_device_code =
798                         cpu_to_le64(huge_encode_dev(inode->i_rdev));
799         /*
800          * When extending inode, nilfs->ns_inode_size should be checked
801          * for substitutions of appended fields.
802          */
803 }
804
805 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
806 {
807         ino_t ino = inode->i_ino;
808         struct nilfs_inode_info *ii = NILFS_I(inode);
809         struct inode *ifile = ii->i_root->ifile;
810         struct nilfs_inode *raw_inode;
811
812         raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
813
814         if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
815                 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
816         if (flags & I_DIRTY_DATASYNC)
817                 set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
818
819         nilfs_write_inode_common(inode, raw_inode, 0);
820                 /*
821                  * XXX: call with has_bmap = 0 is a workaround to avoid
822                  * deadlock of bmap.  This delays update of i_bmap to just
823                  * before writing.
824                  */
825
826         nilfs_ifile_unmap_inode(ifile, ino, ibh);
827 }
828
829 #define NILFS_MAX_TRUNCATE_BLOCKS       16384  /* 64MB for 4KB block */
830
831 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
832                                 unsigned long from)
833 {
834         __u64 b;
835         int ret;
836
837         if (!test_bit(NILFS_I_BMAP, &ii->i_state))
838                 return;
839 repeat:
840         ret = nilfs_bmap_last_key(ii->i_bmap, &b);
841         if (ret == -ENOENT)
842                 return;
843         else if (ret < 0)
844                 goto failed;
845
846         if (b < from)
847                 return;
848
849         b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
850         ret = nilfs_bmap_truncate(ii->i_bmap, b);
851         nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
852         if (!ret || (ret == -ENOMEM &&
853                      nilfs_bmap_truncate(ii->i_bmap, b) == 0))
854                 goto repeat;
855
856 failed:
857         nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
858                    ret, ii->vfs_inode.i_ino);
859 }
860
861 void nilfs_truncate(struct inode *inode)
862 {
863         unsigned long blkoff;
864         unsigned int blocksize;
865         struct nilfs_transaction_info ti;
866         struct super_block *sb = inode->i_sb;
867         struct nilfs_inode_info *ii = NILFS_I(inode);
868
869         if (!test_bit(NILFS_I_BMAP, &ii->i_state))
870                 return;
871         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
872                 return;
873
874         blocksize = sb->s_blocksize;
875         blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
876         nilfs_transaction_begin(sb, &ti, 0); /* never fails */
877
878         block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
879
880         nilfs_truncate_bmap(ii, blkoff);
881
882         inode->i_mtime = inode->i_ctime = current_time(inode);
883         if (IS_SYNC(inode))
884                 nilfs_set_transaction_flag(NILFS_TI_SYNC);
885
886         nilfs_mark_inode_dirty(inode);
887         nilfs_set_file_dirty(inode, 0);
888         nilfs_transaction_commit(sb);
889         /*
890          * May construct a logical segment and may fail in sync mode.
891          * But truncate has no return value.
892          */
893 }
894
895 static void nilfs_clear_inode(struct inode *inode)
896 {
897         struct nilfs_inode_info *ii = NILFS_I(inode);
898
899         /*
900          * Free resources allocated in nilfs_read_inode(), here.
901          */
902         BUG_ON(!list_empty(&ii->i_dirty));
903         brelse(ii->i_bh);
904         ii->i_bh = NULL;
905
906         if (nilfs_is_metadata_file_inode(inode))
907                 nilfs_mdt_clear(inode);
908
909         if (test_bit(NILFS_I_BMAP, &ii->i_state))
910                 nilfs_bmap_clear(ii->i_bmap);
911
912         if (!test_bit(NILFS_I_BTNC, &ii->i_state))
913                 nilfs_detach_btree_node_cache(inode);
914
915         if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
916                 nilfs_put_root(ii->i_root);
917 }
918
919 void nilfs_evict_inode(struct inode *inode)
920 {
921         struct nilfs_transaction_info ti;
922         struct super_block *sb = inode->i_sb;
923         struct nilfs_inode_info *ii = NILFS_I(inode);
924         int ret;
925
926         if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
927                 truncate_inode_pages_final(&inode->i_data);
928                 clear_inode(inode);
929                 nilfs_clear_inode(inode);
930                 return;
931         }
932         nilfs_transaction_begin(sb, &ti, 0); /* never fails */
933
934         truncate_inode_pages_final(&inode->i_data);
935
936         /* TODO: some of the following operations may fail.  */
937         nilfs_truncate_bmap(ii, 0);
938         nilfs_mark_inode_dirty(inode);
939         clear_inode(inode);
940
941         ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
942         if (!ret)
943                 atomic64_dec(&ii->i_root->inodes_count);
944
945         nilfs_clear_inode(inode);
946
947         if (IS_SYNC(inode))
948                 nilfs_set_transaction_flag(NILFS_TI_SYNC);
949         nilfs_transaction_commit(sb);
950         /*
951          * May construct a logical segment and may fail in sync mode.
952          * But delete_inode has no return value.
953          */
954 }
955
956 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
957 {
958         struct nilfs_transaction_info ti;
959         struct inode *inode = d_inode(dentry);
960         struct super_block *sb = inode->i_sb;
961         int err;
962
963         err = setattr_prepare(dentry, iattr);
964         if (err)
965                 return err;
966
967         err = nilfs_transaction_begin(sb, &ti, 0);
968         if (unlikely(err))
969                 return err;
970
971         if ((iattr->ia_valid & ATTR_SIZE) &&
972             iattr->ia_size != i_size_read(inode)) {
973                 inode_dio_wait(inode);
974                 truncate_setsize(inode, iattr->ia_size);
975                 nilfs_truncate(inode);
976         }
977
978         setattr_copy(inode, iattr);
979         mark_inode_dirty(inode);
980
981         if (iattr->ia_valid & ATTR_MODE) {
982                 err = nilfs_acl_chmod(inode);
983                 if (unlikely(err))
984                         goto out_err;
985         }
986
987         return nilfs_transaction_commit(sb);
988
989 out_err:
990         nilfs_transaction_abort(sb);
991         return err;
992 }
993
994 int nilfs_permission(struct inode *inode, int mask)
995 {
996         struct nilfs_root *root = NILFS_I(inode)->i_root;
997
998         if ((mask & MAY_WRITE) && root &&
999             root->cno != NILFS_CPTREE_CURRENT_CNO)
1000                 return -EROFS; /* snapshot is not writable */
1001
1002         return generic_permission(inode, mask);
1003 }
1004
1005 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
1006 {
1007         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1008         struct nilfs_inode_info *ii = NILFS_I(inode);
1009         int err;
1010
1011         spin_lock(&nilfs->ns_inode_lock);
1012         if (ii->i_bh == NULL) {
1013                 spin_unlock(&nilfs->ns_inode_lock);
1014                 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
1015                                                   inode->i_ino, pbh);
1016                 if (unlikely(err))
1017                         return err;
1018                 spin_lock(&nilfs->ns_inode_lock);
1019                 if (ii->i_bh == NULL)
1020                         ii->i_bh = *pbh;
1021                 else {
1022                         brelse(*pbh);
1023                         *pbh = ii->i_bh;
1024                 }
1025         } else
1026                 *pbh = ii->i_bh;
1027
1028         get_bh(*pbh);
1029         spin_unlock(&nilfs->ns_inode_lock);
1030         return 0;
1031 }
1032
1033 int nilfs_inode_dirty(struct inode *inode)
1034 {
1035         struct nilfs_inode_info *ii = NILFS_I(inode);
1036         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1037         int ret = 0;
1038
1039         if (!list_empty(&ii->i_dirty)) {
1040                 spin_lock(&nilfs->ns_inode_lock);
1041                 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
1042                         test_bit(NILFS_I_BUSY, &ii->i_state);
1043                 spin_unlock(&nilfs->ns_inode_lock);
1044         }
1045         return ret;
1046 }
1047
1048 int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
1049 {
1050         struct nilfs_inode_info *ii = NILFS_I(inode);
1051         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1052
1053         atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
1054
1055         if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
1056                 return 0;
1057
1058         spin_lock(&nilfs->ns_inode_lock);
1059         if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
1060             !test_bit(NILFS_I_BUSY, &ii->i_state)) {
1061                 /*
1062                  * Because this routine may race with nilfs_dispose_list(),
1063                  * we have to check NILFS_I_QUEUED here, too.
1064                  */
1065                 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
1066                         /*
1067                          * This will happen when somebody is freeing
1068                          * this inode.
1069                          */
1070                         nilfs_warn(inode->i_sb,
1071                                    "cannot set file dirty (ino=%lu): the file is being freed",
1072                                    inode->i_ino);
1073                         spin_unlock(&nilfs->ns_inode_lock);
1074                         return -EINVAL; /*
1075                                          * NILFS_I_DIRTY may remain for
1076                                          * freeing inode.
1077                                          */
1078                 }
1079                 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
1080                 set_bit(NILFS_I_QUEUED, &ii->i_state);
1081         }
1082         spin_unlock(&nilfs->ns_inode_lock);
1083         return 0;
1084 }
1085
1086 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
1087 {
1088         struct buffer_head *ibh;
1089         int err;
1090
1091         err = nilfs_load_inode_block(inode, &ibh);
1092         if (unlikely(err)) {
1093                 nilfs_warn(inode->i_sb,
1094                            "cannot mark inode dirty (ino=%lu): error %d loading inode block",
1095                            inode->i_ino, err);
1096                 return err;
1097         }
1098         nilfs_update_inode(inode, ibh, flags);
1099         mark_buffer_dirty(ibh);
1100         nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
1101         brelse(ibh);
1102         return 0;
1103 }
1104
1105 /**
1106  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
1107  * @inode: inode of the file to be registered.
1108  *
1109  * nilfs_dirty_inode() loads a inode block containing the specified
1110  * @inode and copies data from a nilfs_inode to a corresponding inode
1111  * entry in the inode block. This operation is excluded from the segment
1112  * construction. This function can be called both as a single operation
1113  * and as a part of indivisible file operations.
1114  */
1115 void nilfs_dirty_inode(struct inode *inode, int flags)
1116 {
1117         struct nilfs_transaction_info ti;
1118         struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
1119
1120         if (is_bad_inode(inode)) {
1121                 nilfs_warn(inode->i_sb,
1122                            "tried to mark bad_inode dirty. ignored.");
1123                 dump_stack();
1124                 return;
1125         }
1126         if (mdi) {
1127                 nilfs_mdt_mark_dirty(inode);
1128                 return;
1129         }
1130         nilfs_transaction_begin(inode->i_sb, &ti, 0);
1131         __nilfs_mark_inode_dirty(inode, flags);
1132         nilfs_transaction_commit(inode->i_sb); /* never fails */
1133 }
1134
1135 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1136                  __u64 start, __u64 len)
1137 {
1138         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1139         __u64 logical = 0, phys = 0, size = 0;
1140         __u32 flags = 0;
1141         loff_t isize;
1142         sector_t blkoff, end_blkoff;
1143         sector_t delalloc_blkoff;
1144         unsigned long delalloc_blklen;
1145         unsigned int blkbits = inode->i_blkbits;
1146         int ret, n;
1147
1148         ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1149         if (ret)
1150                 return ret;
1151
1152         inode_lock(inode);
1153
1154         isize = i_size_read(inode);
1155
1156         blkoff = start >> blkbits;
1157         end_blkoff = (start + len - 1) >> blkbits;
1158
1159         delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1160                                                         &delalloc_blkoff);
1161
1162         do {
1163                 __u64 blkphy;
1164                 unsigned int maxblocks;
1165
1166                 if (delalloc_blklen && blkoff == delalloc_blkoff) {
1167                         if (size) {
1168                                 /* End of the current extent */
1169                                 ret = fiemap_fill_next_extent(
1170                                         fieinfo, logical, phys, size, flags);
1171                                 if (ret)
1172                                         break;
1173                         }
1174                         if (blkoff > end_blkoff)
1175                                 break;
1176
1177                         flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1178                         logical = blkoff << blkbits;
1179                         phys = 0;
1180                         size = delalloc_blklen << blkbits;
1181
1182                         blkoff = delalloc_blkoff + delalloc_blklen;
1183                         delalloc_blklen = nilfs_find_uncommitted_extent(
1184                                 inode, blkoff, &delalloc_blkoff);
1185                         continue;
1186                 }
1187
1188                 /*
1189                  * Limit the number of blocks that we look up so as
1190                  * not to get into the next delayed allocation extent.
1191                  */
1192                 maxblocks = INT_MAX;
1193                 if (delalloc_blklen)
1194                         maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1195                                           maxblocks);
1196                 blkphy = 0;
1197
1198                 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1199                 n = nilfs_bmap_lookup_contig(
1200                         NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1201                 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1202
1203                 if (n < 0) {
1204                         int past_eof;
1205
1206                         if (unlikely(n != -ENOENT))
1207                                 break; /* error */
1208
1209                         /* HOLE */
1210                         blkoff++;
1211                         past_eof = ((blkoff << blkbits) >= isize);
1212
1213                         if (size) {
1214                                 /* End of the current extent */
1215
1216                                 if (past_eof)
1217                                         flags |= FIEMAP_EXTENT_LAST;
1218
1219                                 ret = fiemap_fill_next_extent(
1220                                         fieinfo, logical, phys, size, flags);
1221                                 if (ret)
1222                                         break;
1223                                 size = 0;
1224                         }
1225                         if (blkoff > end_blkoff || past_eof)
1226                                 break;
1227                 } else {
1228                         if (size) {
1229                                 if (phys && blkphy << blkbits == phys + size) {
1230                                         /* The current extent goes on */
1231                                         size += n << blkbits;
1232                                 } else {
1233                                         /* Terminate the current extent */
1234                                         ret = fiemap_fill_next_extent(
1235                                                 fieinfo, logical, phys, size,
1236                                                 flags);
1237                                         if (ret || blkoff > end_blkoff)
1238                                                 break;
1239
1240                                         /* Start another extent */
1241                                         flags = FIEMAP_EXTENT_MERGED;
1242                                         logical = blkoff << blkbits;
1243                                         phys = blkphy << blkbits;
1244                                         size = n << blkbits;
1245                                 }
1246                         } else {
1247                                 /* Start a new extent */
1248                                 flags = FIEMAP_EXTENT_MERGED;
1249                                 logical = blkoff << blkbits;
1250                                 phys = blkphy << blkbits;
1251                                 size = n << blkbits;
1252                         }
1253                         blkoff += n;
1254                 }
1255                 cond_resched();
1256         } while (true);
1257
1258         /* If ret is 1 then we just hit the end of the extent array */
1259         if (ret == 1)
1260                 ret = 0;
1261
1262         inode_unlock(inode);
1263         return ret;
1264 }