1 // SPDX-License-Identifier: GPL-2.0
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
11 * linux/fs/ext2/inode.c
13 * Copyright (C) 1992, 1993, 1994, 1995
14 * Remy Card (card@masi.ibp.fr)
15 * Laboratoire MASI - Institut Blaise Pascal
16 * Universite Pierre et Marie Curie (Paris VI)
20 * linux/fs/minix/inode.c
22 * Copyright (C) 1991, 1992 Linus Torvalds
24 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
25 * Big-endian to little-endian byte-swapping/bitmaps by
26 * David S. Miller (davem@caip.rutgers.edu), 1995
29 #include <linux/uaccess.h>
31 #include <linux/errno.h>
33 #include <linux/time.h>
34 #include <linux/stat.h>
35 #include <linux/string.h>
37 #include <linux/buffer_head.h>
38 #include <linux/writeback.h>
39 #include <linux/iversion.h>
46 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
48 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
49 int ptrs = uspi->s_apb;
50 int ptrs_bits = uspi->s_apbshift;
51 const long direct_blocks = UFS_NDADDR,
52 indirect_blocks = ptrs,
53 double_blocks = (1 << (ptrs_bits * 2));
57 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
58 if (i_block < direct_blocks) {
59 offsets[n++] = i_block;
60 } else if ((i_block -= direct_blocks) < indirect_blocks) {
61 offsets[n++] = UFS_IND_BLOCK;
62 offsets[n++] = i_block;
63 } else if ((i_block -= indirect_blocks) < double_blocks) {
64 offsets[n++] = UFS_DIND_BLOCK;
65 offsets[n++] = i_block >> ptrs_bits;
66 offsets[n++] = i_block & (ptrs - 1);
67 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
68 offsets[n++] = UFS_TIND_BLOCK;
69 offsets[n++] = i_block >> (ptrs_bits * 2);
70 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
71 offsets[n++] = i_block & (ptrs - 1);
73 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
84 struct buffer_head *bh;
87 static inline int grow_chain32(struct ufs_inode_info *ufsi,
88 struct buffer_head *bh, __fs32 *v,
89 Indirect *from, Indirect *to)
95 seq = read_seqbegin(&ufsi->meta_lock);
96 to->key32 = *(__fs32 *)(to->p = v);
97 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
99 } while (read_seqretry(&ufsi->meta_lock, seq));
103 static inline int grow_chain64(struct ufs_inode_info *ufsi,
104 struct buffer_head *bh, __fs64 *v,
105 Indirect *from, Indirect *to)
111 seq = read_seqbegin(&ufsi->meta_lock);
112 to->key64 = *(__fs64 *)(to->p = v);
113 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
115 } while (read_seqretry(&ufsi->meta_lock, seq));
120 * Returns the location of the fragment from
121 * the beginning of the filesystem.
124 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
126 struct ufs_inode_info *ufsi = UFS_I(inode);
127 struct super_block *sb = inode->i_sb;
128 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
129 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
130 int shift = uspi->s_apbshift-uspi->s_fpbshift;
131 Indirect chain[4], *q = chain;
133 unsigned flags = UFS_SB(sb)->s_flags;
136 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
137 uspi->s_fpbshift, uspi->s_apbmask,
138 (unsigned long long)mask);
146 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
149 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
155 struct buffer_head *bh;
158 bh = sb_bread(sb, uspi->s_sbbase +
159 fs32_to_cpu(sb, q->key32) + (n>>shift));
162 ptr = (__fs32 *)bh->b_data + (n & mask);
163 if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
168 res = fs32_to_cpu(sb, q->key32);
172 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
179 struct buffer_head *bh;
182 bh = sb_bread(sb, uspi->s_sbbase +
183 fs64_to_cpu(sb, q->key64) + (n>>shift));
186 ptr = (__fs64 *)bh->b_data + (n & mask);
187 if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
192 res = fs64_to_cpu(sb, q->key64);
194 res += uspi->s_sbbase;
211 * Unpacking tails: we have a file with partial final block and
212 * we had been asked to extend it. If the fragment being written
213 * is within the same block, we need to extend the tail just to cover
214 * that fragment. Otherwise the tail is extended to full block.
216 * Note that we might need to create a _new_ tail, but that will
217 * be handled elsewhere; this is strictly for resizing old
221 ufs_extend_tail(struct inode *inode, u64 writes_to,
222 int *err, struct page *locked_page)
224 struct ufs_inode_info *ufsi = UFS_I(inode);
225 struct super_block *sb = inode->i_sb;
226 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
227 unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */
228 unsigned block = ufs_fragstoblks(lastfrag);
233 if (writes_to < (lastfrag | uspi->s_fpbmask))
234 new_size = (writes_to & uspi->s_fpbmask) + 1;
236 new_size = uspi->s_fpb;
238 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
239 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
240 new_size - (lastfrag & uspi->s_fpbmask), err,
246 * ufs_inode_getfrag() - allocate new fragment(s)
247 * @inode: pointer to inode
248 * @index: number of block pointer within the inode's array.
249 * @new_fragment: number of new allocated fragment(s)
250 * @err: we set it if something wrong
251 * @new: we set it if we allocate new block
252 * @locked_page: for ufs_new_fragments()
255 ufs_inode_getfrag(struct inode *inode, unsigned index,
256 sector_t new_fragment, int *err,
257 int *new, struct page *locked_page)
259 struct ufs_inode_info *ufsi = UFS_I(inode);
260 struct super_block *sb = inode->i_sb;
261 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
262 u64 tmp, goal, lastfrag;
263 unsigned nfrags = uspi->s_fpb;
266 /* TODO : to be done for write support
267 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
271 p = ufs_get_direct_data_ptr(uspi, ufsi, index);
272 tmp = ufs_data_ptr_to_cpu(sb, p);
276 lastfrag = ufsi->i_lastfrag;
278 /* will that be a new tail? */
279 if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
280 nfrags = (new_fragment & uspi->s_fpbmask) + 1;
284 goal = ufs_data_ptr_to_cpu(sb,
285 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
289 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
290 goal, nfrags, err, locked_page);
299 inode->i_ctime = current_time(inode);
301 ufs_sync_inode (inode);
302 mark_inode_dirty(inode);
304 return tmp + uspi->s_sbbase;
306 /* This part : To be implemented ....
307 Required only for writing, not required for READ-ONLY.
310 u2_block = ufs_fragstoblks(fragment);
311 u2_blockoff = ufs_fragnum(fragment);
312 p = ufsi->i_u1.u2_i_data + block;
316 tmp = fs32_to_cpu(sb, *p);
317 lastfrag = ufsi->i_lastfrag;
323 * ufs_inode_getblock() - allocate new block
324 * @inode: pointer to inode
325 * @ind_block: block number of the indirect block
326 * @index: number of pointer within the indirect block
327 * @new_fragment: number of new allocated fragment
328 * (block will hold this fragment and also uspi->s_fpb-1)
329 * @err: see ufs_inode_getfrag()
330 * @new: see ufs_inode_getfrag()
331 * @locked_page: see ufs_inode_getfrag()
334 ufs_inode_getblock(struct inode *inode, u64 ind_block,
335 unsigned index, sector_t new_fragment, int *err,
336 int *new, struct page *locked_page)
338 struct super_block *sb = inode->i_sb;
339 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
340 int shift = uspi->s_apbshift - uspi->s_fpbshift;
342 struct buffer_head *bh;
348 bh = sb_bread(sb, ind_block + (index >> shift));
354 index &= uspi->s_apbmask >> uspi->s_fpbshift;
355 if (uspi->fs_magic == UFS2_MAGIC)
356 p = (__fs64 *)bh->b_data + index;
358 p = (__fs32 *)bh->b_data + index;
360 tmp = ufs_data_ptr_to_cpu(sb, p);
364 if (index && (uspi->fs_magic == UFS2_MAGIC ?
365 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
366 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
367 goal = tmp + uspi->s_fpb;
369 goal = bh->b_blocknr + uspi->s_fpb;
370 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
371 uspi->s_fpb, err, locked_page);
378 mark_buffer_dirty(bh);
380 sync_dirty_buffer(bh);
381 inode->i_ctime = current_time(inode);
382 mark_inode_dirty(inode);
387 tmp += uspi->s_sbbase;
392 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
393 * read_folio, writepage and so on
396 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
398 struct super_block *sb = inode->i_sb;
399 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
400 int err = 0, new = 0;
402 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
404 unsigned frag = fragment & uspi->s_fpbmask;
406 phys64 = ufs_frag_map(inode, offsets, depth);
411 if (fragment >= UFS_NDIR_FRAGMENT)
413 read_seqlock_excl(&UFS_I(inode)->meta_lock);
414 if (fragment < UFS_I(inode)->i_lastfrag) {
415 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
418 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
420 /* This code entered only while writing ....? */
422 mutex_lock(&UFS_I(inode)->truncate_mutex);
424 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
425 if (unlikely(!depth)) {
426 ufs_warning(sb, "ufs_get_block", "block > big");
431 if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
432 unsigned lastfrag = UFS_I(inode)->i_lastfrag;
433 unsigned tailfrags = lastfrag & uspi->s_fpbmask;
434 if (tailfrags && fragment >= lastfrag) {
435 if (!ufs_extend_tail(inode, fragment,
436 &err, bh_result->b_page))
442 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
443 &err, &new, bh_result->b_page);
446 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
448 for (i = 1; i < depth - 1; i++)
449 phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
450 fragment, &err, NULL, NULL);
451 phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
452 fragment, &err, &new, bh_result->b_page);
457 map_bh(bh_result, sb, phys64);
459 set_buffer_new(bh_result);
461 mutex_unlock(&UFS_I(inode)->truncate_mutex);
466 map_bh(bh_result, sb, phys64 + frag);
470 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
472 return block_write_full_page(page,ufs_getfrag_block,wbc);
475 static int ufs_read_folio(struct file *file, struct folio *folio)
477 return block_read_full_folio(folio, ufs_getfrag_block);
480 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
482 return __block_write_begin(page, pos, len, ufs_getfrag_block);
485 static void ufs_truncate_blocks(struct inode *);
487 static void ufs_write_failed(struct address_space *mapping, loff_t to)
489 struct inode *inode = mapping->host;
491 if (to > inode->i_size) {
492 truncate_pagecache(inode, inode->i_size);
493 ufs_truncate_blocks(inode);
497 static int ufs_write_begin(struct file *file, struct address_space *mapping,
498 loff_t pos, unsigned len,
499 struct page **pagep, void **fsdata)
503 ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block);
505 ufs_write_failed(mapping, pos + len);
510 static int ufs_write_end(struct file *file, struct address_space *mapping,
511 loff_t pos, unsigned len, unsigned copied,
512 struct page *page, void *fsdata)
516 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
518 ufs_write_failed(mapping, pos + len);
522 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
524 return generic_block_bmap(mapping,block,ufs_getfrag_block);
527 const struct address_space_operations ufs_aops = {
528 .dirty_folio = block_dirty_folio,
529 .invalidate_folio = block_invalidate_folio,
530 .read_folio = ufs_read_folio,
531 .writepage = ufs_writepage,
532 .write_begin = ufs_write_begin,
533 .write_end = ufs_write_end,
537 static void ufs_set_inode_ops(struct inode *inode)
539 if (S_ISREG(inode->i_mode)) {
540 inode->i_op = &ufs_file_inode_operations;
541 inode->i_fop = &ufs_file_operations;
542 inode->i_mapping->a_ops = &ufs_aops;
543 } else if (S_ISDIR(inode->i_mode)) {
544 inode->i_op = &ufs_dir_inode_operations;
545 inode->i_fop = &ufs_dir_operations;
546 inode->i_mapping->a_ops = &ufs_aops;
547 } else if (S_ISLNK(inode->i_mode)) {
548 if (!inode->i_blocks) {
549 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
550 inode->i_op = &simple_symlink_inode_operations;
552 inode->i_mapping->a_ops = &ufs_aops;
553 inode->i_op = &page_symlink_inode_operations;
554 inode_nohighmem(inode);
557 init_special_inode(inode, inode->i_mode,
558 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
561 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
563 struct ufs_inode_info *ufsi = UFS_I(inode);
564 struct super_block *sb = inode->i_sb;
568 * Copy data to the in-core inode.
570 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
571 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
572 if (inode->i_nlink == 0)
576 * Linux now has 32-bit uid and gid, so we can support EFT.
578 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
579 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
581 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
582 inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
583 inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
584 inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
585 inode->i_mtime.tv_nsec = 0;
586 inode->i_atime.tv_nsec = 0;
587 inode->i_ctime.tv_nsec = 0;
588 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
589 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
590 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
591 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
592 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
595 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
596 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
597 sizeof(ufs_inode->ui_u2.ui_addr));
599 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
600 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
601 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
606 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
608 struct ufs_inode_info *ufsi = UFS_I(inode);
609 struct super_block *sb = inode->i_sb;
612 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
614 * Copy data to the in-core inode.
616 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
617 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
618 if (inode->i_nlink == 0)
622 * Linux now has 32-bit uid and gid, so we can support EFT.
624 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
625 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
627 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
628 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
629 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
630 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
631 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
632 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
633 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
634 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
635 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
636 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
638 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
639 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
642 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
643 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
644 sizeof(ufs2_inode->ui_u2.ui_addr));
646 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
647 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
648 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
653 struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
655 struct ufs_inode_info *ufsi;
656 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
657 struct buffer_head * bh;
661 UFSD("ENTER, ino %lu\n", ino);
663 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
664 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
666 return ERR_PTR(-EIO);
669 inode = iget_locked(sb, ino);
671 return ERR_PTR(-ENOMEM);
672 if (!(inode->i_state & I_NEW))
677 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
679 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
683 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
684 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
686 err = ufs2_read_inode(inode,
687 ufs2_inode + ufs_inotofsbo(inode->i_ino));
689 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
691 err = ufs1_read_inode(inode,
692 ufs_inode + ufs_inotofsbo(inode->i_ino));
698 inode_inc_iversion(inode);
700 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
701 ufsi->i_dir_start_lookup = 0;
704 ufs_set_inode_ops(inode);
707 unlock_new_inode(inode);
715 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
717 struct super_block *sb = inode->i_sb;
718 struct ufs_inode_info *ufsi = UFS_I(inode);
720 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
721 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
723 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
724 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
726 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
727 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
728 ufs_inode->ui_atime.tv_usec = 0;
729 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
730 ufs_inode->ui_ctime.tv_usec = 0;
731 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
732 ufs_inode->ui_mtime.tv_usec = 0;
733 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
734 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
735 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
737 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
738 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
739 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
742 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
743 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
744 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
745 } else if (inode->i_blocks) {
746 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
747 sizeof(ufs_inode->ui_u2.ui_addr));
750 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
751 sizeof(ufs_inode->ui_u2.ui_symlink));
755 memset (ufs_inode, 0, sizeof(struct ufs_inode));
758 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
760 struct super_block *sb = inode->i_sb;
761 struct ufs_inode_info *ufsi = UFS_I(inode);
764 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
765 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
767 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
768 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
770 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
771 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
772 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
773 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
774 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
775 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
776 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
778 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
779 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
780 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
782 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
783 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
784 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
785 } else if (inode->i_blocks) {
786 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
787 sizeof(ufs_inode->ui_u2.ui_addr));
789 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
790 sizeof(ufs_inode->ui_u2.ui_symlink));
794 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
798 static int ufs_update_inode(struct inode * inode, int do_sync)
800 struct super_block *sb = inode->i_sb;
801 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
802 struct buffer_head * bh;
804 UFSD("ENTER, ino %lu\n", inode->i_ino);
806 if (inode->i_ino < UFS_ROOTINO ||
807 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
808 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
812 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
814 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
817 if (uspi->fs_magic == UFS2_MAGIC) {
818 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
820 ufs2_update_inode(inode,
821 ufs2_inode + ufs_inotofsbo(inode->i_ino));
823 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
825 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
828 mark_buffer_dirty(bh);
830 sync_dirty_buffer(bh);
837 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
839 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
842 int ufs_sync_inode (struct inode *inode)
844 return ufs_update_inode (inode, 1);
847 void ufs_evict_inode(struct inode * inode)
851 if (!inode->i_nlink && !is_bad_inode(inode))
854 truncate_inode_pages_final(&inode->i_data);
857 if (inode->i_blocks &&
858 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
859 S_ISLNK(inode->i_mode)))
860 ufs_truncate_blocks(inode);
861 ufs_update_inode(inode, inode_needs_sync(inode));
864 invalidate_inode_buffers(inode);
868 ufs_free_inode(inode);
877 static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
879 if (ctx->count && ctx->to != from) {
880 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
884 ctx->to = from + count;
887 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
889 static void ufs_trunc_direct(struct inode *inode)
891 struct ufs_inode_info *ufsi = UFS_I(inode);
892 struct super_block * sb;
893 struct ufs_sb_private_info * uspi;
895 u64 frag1, frag2, frag3, frag4, block1, block2;
896 struct to_free ctx = {.inode = inode};
899 UFSD("ENTER: ino %lu\n", inode->i_ino);
902 uspi = UFS_SB(sb)->s_uspi;
904 frag1 = DIRECT_FRAGMENT;
905 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
906 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
907 frag3 = frag4 & ~uspi->s_fpbmask;
912 } else if (frag2 < frag3) {
913 block1 = ufs_fragstoblks (frag2);
914 block2 = ufs_fragstoblks (frag3);
917 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
918 " frag3 %llu, frag4 %llu\n", inode->i_ino,
919 (unsigned long long)frag1, (unsigned long long)frag2,
920 (unsigned long long)block1, (unsigned long long)block2,
921 (unsigned long long)frag3, (unsigned long long)frag4);
927 * Free first free fragments
929 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
930 tmp = ufs_data_ptr_to_cpu(sb, p);
932 ufs_panic (sb, "ufs_trunc_direct", "internal error");
934 frag1 = ufs_fragnum (frag1);
936 ufs_free_fragments(inode, tmp + frag1, frag2);
942 for (i = block1 ; i < block2; i++) {
943 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
944 tmp = ufs_data_ptr_to_cpu(sb, p);
947 write_seqlock(&ufsi->meta_lock);
948 ufs_data_ptr_clear(uspi, p);
949 write_sequnlock(&ufsi->meta_lock);
951 free_data(&ctx, tmp, uspi->s_fpb);
954 free_data(&ctx, 0, 0);
960 * Free last free fragments
962 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
963 tmp = ufs_data_ptr_to_cpu(sb, p);
965 ufs_panic(sb, "ufs_truncate_direct", "internal error");
966 frag4 = ufs_fragnum (frag4);
967 write_seqlock(&ufsi->meta_lock);
968 ufs_data_ptr_clear(uspi, p);
969 write_sequnlock(&ufsi->meta_lock);
971 ufs_free_fragments (inode, tmp, frag4);
974 UFSD("EXIT: ino %lu\n", inode->i_ino);
977 static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
979 struct super_block *sb = inode->i_sb;
980 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
981 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
988 for (i = 0; i < uspi->s_apb; i++) {
989 void *p = ubh_get_data_ptr(uspi, ubh, i);
990 u64 block = ufs_data_ptr_to_cpu(sb, p);
992 free_full_branch(inode, block, depth);
995 struct to_free ctx = {.inode = inode};
997 for (i = 0; i < uspi->s_apb; i++) {
998 void *p = ubh_get_data_ptr(uspi, ubh, i);
999 u64 block = ufs_data_ptr_to_cpu(sb, p);
1001 free_data(&ctx, block, uspi->s_fpb);
1003 free_data(&ctx, 0, 0);
1007 ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1010 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1012 struct super_block *sb = inode->i_sb;
1013 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1017 for (i = from; i < uspi->s_apb ; i++) {
1018 void *p = ubh_get_data_ptr(uspi, ubh, i);
1019 u64 block = ufs_data_ptr_to_cpu(sb, p);
1021 write_seqlock(&UFS_I(inode)->meta_lock);
1022 ufs_data_ptr_clear(uspi, p);
1023 write_sequnlock(&UFS_I(inode)->meta_lock);
1024 ubh_mark_buffer_dirty(ubh);
1025 free_full_branch(inode, block, depth);
1029 struct to_free ctx = {.inode = inode};
1031 for (i = from; i < uspi->s_apb; i++) {
1032 void *p = ubh_get_data_ptr(uspi, ubh, i);
1033 u64 block = ufs_data_ptr_to_cpu(sb, p);
1035 write_seqlock(&UFS_I(inode)->meta_lock);
1036 ufs_data_ptr_clear(uspi, p);
1037 write_sequnlock(&UFS_I(inode)->meta_lock);
1038 ubh_mark_buffer_dirty(ubh);
1039 free_data(&ctx, block, uspi->s_fpb);
1042 free_data(&ctx, 0, 0);
1044 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1045 ubh_sync_block(ubh);
1049 static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1052 struct super_block *sb = inode->i_sb;
1053 struct address_space *mapping = inode->i_mapping;
1054 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1057 struct page *lastpage;
1058 struct buffer_head *bh;
1061 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1068 lastpage = ufs_get_locked_page(mapping, lastfrag >>
1069 (PAGE_SHIFT - inode->i_blkbits));
1070 if (IS_ERR(lastpage)) {
1075 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1076 bh = page_buffers(lastpage);
1077 for (i = 0; i < end; ++i)
1078 bh = bh->b_this_page;
1081 err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1086 if (buffer_new(bh)) {
1087 clear_buffer_new(bh);
1088 clean_bdev_bh_alias(bh);
1090 * we do not zeroize fragment, because of
1091 * if it maped to hole, it already contains zeroes
1093 set_buffer_uptodate(bh);
1094 mark_buffer_dirty(bh);
1095 set_page_dirty(lastpage);
1098 if (lastfrag >= UFS_IND_FRAGMENT) {
1099 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1100 phys64 = bh->b_blocknr + 1;
1101 for (i = 0; i < end; ++i) {
1102 bh = sb_getblk(sb, i + phys64);
1104 memset(bh->b_data, 0, sb->s_blocksize);
1105 set_buffer_uptodate(bh);
1106 mark_buffer_dirty(bh);
1108 sync_dirty_buffer(bh);
1113 ufs_put_locked_page(lastpage);
1118 static void ufs_truncate_blocks(struct inode *inode)
1120 struct ufs_inode_info *ufsi = UFS_I(inode);
1121 struct super_block *sb = inode->i_sb;
1122 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1123 unsigned offsets[4];
1127 struct ufs_buffer_head *ubh[3];
1131 if (inode->i_size) {
1132 sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1133 depth = ufs_block_to_path(inode, last, offsets);
1140 for (depth2 = depth - 1; depth2; depth2--)
1141 if (offsets[depth2] != uspi->s_apb - 1)
1144 mutex_lock(&ufsi->truncate_mutex);
1146 ufs_trunc_direct(inode);
1147 offsets[0] = UFS_IND_BLOCK;
1149 /* get the blocks that should be partially emptied */
1150 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1151 for (i = 0; i < depth2; i++) {
1152 block = ufs_data_ptr_to_cpu(sb, p);
1155 ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1157 write_seqlock(&ufsi->meta_lock);
1158 ufs_data_ptr_clear(uspi, p);
1159 write_sequnlock(&ufsi->meta_lock);
1162 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1165 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1167 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1168 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1169 block = ufs_data_ptr_to_cpu(sb, p);
1171 write_seqlock(&ufsi->meta_lock);
1172 ufs_data_ptr_clear(uspi, p);
1173 write_sequnlock(&ufsi->meta_lock);
1174 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1177 read_seqlock_excl(&ufsi->meta_lock);
1178 ufsi->i_lastfrag = DIRECT_FRAGMENT;
1179 read_sequnlock_excl(&ufsi->meta_lock);
1180 mark_inode_dirty(inode);
1181 mutex_unlock(&ufsi->truncate_mutex);
1184 static int ufs_truncate(struct inode *inode, loff_t size)
1188 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1189 inode->i_ino, (unsigned long long)size,
1190 (unsigned long long)i_size_read(inode));
1192 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1193 S_ISLNK(inode->i_mode)))
1195 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1198 err = ufs_alloc_lastblock(inode, size);
1203 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1205 truncate_setsize(inode, size);
1207 ufs_truncate_blocks(inode);
1208 inode->i_mtime = inode->i_ctime = current_time(inode);
1209 mark_inode_dirty(inode);
1211 UFSD("EXIT: err %d\n", err);
1215 int ufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
1218 struct inode *inode = d_inode(dentry);
1219 unsigned int ia_valid = attr->ia_valid;
1222 error = setattr_prepare(&init_user_ns, dentry, attr);
1226 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1227 error = ufs_truncate(inode, attr->ia_size);
1232 setattr_copy(&init_user_ns, inode, attr);
1233 mark_inode_dirty(inode);
1237 const struct inode_operations ufs_file_inode_operations = {
1238 .setattr = ufs_setattr,