1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
30 #include "trace_gfs2.h"
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39 int mp_fheight; /* find_metapath height */
40 int mp_aheight; /* actual height (lookup height) */
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
48 * @dibh: the dinode buffer
49 * @block: the block number that was allocated
50 * @page: The (optional) page. This is looked up if @page is NULL
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 u64 block, struct page *page)
58 struct inode *inode = &ip->i_inode;
61 if (!page || page->index) {
62 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
68 if (!PageUptodate(page)) {
69 void *kaddr = kmap(page);
70 u64 dsize = i_size_read(inode);
72 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
73 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
76 SetPageUptodate(page);
79 if (gfs2_is_jdata(ip)) {
80 struct buffer_head *bh;
82 if (!page_has_buffers(page))
83 create_empty_buffers(page, BIT(inode->i_blkbits),
86 bh = page_buffers(page);
87 if (!buffer_mapped(bh))
88 map_bh(bh, inode->i_sb, block);
90 set_buffer_uptodate(bh);
91 gfs2_trans_add_data(ip->i_gl, bh);
94 gfs2_ordered_add_inode(ip);
106 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
107 * @ip: The GFS2 inode to unstuff
108 * @page: The (optional) page. This is looked up if the @page is NULL
110 * This routine unstuffs a dinode and returns it to a "normal" state such
111 * that the height can be grown in the traditional way.
116 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
118 struct buffer_head *bh, *dibh;
119 struct gfs2_dinode *di;
121 int isdir = gfs2_is_dir(ip);
124 down_write(&ip->i_rw_mutex);
126 error = gfs2_meta_inode_buffer(ip, &dibh);
130 if (i_size_read(&ip->i_inode)) {
131 /* Get a free block, fill it with the stuffed data,
132 and write it out to disk */
135 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
139 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
140 error = gfs2_dir_get_new_buffer(ip, block, &bh);
143 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
144 dibh, sizeof(struct gfs2_dinode));
147 error = gfs2_unstuffer_page(ip, dibh, block, page);
153 /* Set up the pointer to the new block */
155 gfs2_trans_add_meta(ip->i_gl, dibh);
156 di = (struct gfs2_dinode *)dibh->b_data;
157 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
159 if (i_size_read(&ip->i_inode)) {
160 *(__be64 *)(di + 1) = cpu_to_be64(block);
161 gfs2_add_inode_blocks(&ip->i_inode, 1);
162 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
166 di->di_height = cpu_to_be16(1);
171 up_write(&ip->i_rw_mutex);
177 * find_metapath - Find path through the metadata tree
178 * @sdp: The superblock
179 * @block: The disk block to look up
180 * @mp: The metapath to return the result in
181 * @height: The pre-calculated height of the metadata tree
183 * This routine returns a struct metapath structure that defines a path
184 * through the metadata of inode "ip" to get to block "block".
187 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
188 * filesystem with a blocksize of 4096.
190 * find_metapath() would return a struct metapath structure set to:
191 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
193 * That means that in order to get to the block containing the byte at
194 * offset 101342453, we would load the indirect block pointed to by pointer
195 * 0 in the dinode. We would then load the indirect block pointed to by
196 * pointer 48 in that indirect block. We would then load the data block
197 * pointed to by pointer 165 in that indirect block.
199 * ----------------------------------------
204 * ----------------------------------------
208 * ----------------------------------------
212 * |0 5 6 7 8 9 0 1 2|
213 * ----------------------------------------
217 * ----------------------------------------
222 * ----------------------------------------
226 * ----------------------------------------
227 * | Data block containing offset |
231 * ----------------------------------------
235 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
236 struct metapath *mp, unsigned int height)
240 mp->mp_fheight = height;
241 for (i = height; i--;)
242 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
245 static inline unsigned int metapath_branch_start(const struct metapath *mp)
247 if (mp->mp_list[0] == 0)
253 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
254 * @height: The metadata height (0 = dinode)
257 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
259 struct buffer_head *bh = mp->mp_bh[height];
261 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
262 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
266 * metapointer - Return pointer to start of metadata in a buffer
267 * @height: The metadata height (0 = dinode)
270 * Return a pointer to the block number of the next height of the metadata
271 * tree given a buffer containing the pointer to the current height of the
275 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
277 __be64 *p = metaptr1(height, mp);
278 return p + mp->mp_list[height];
281 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
283 const struct buffer_head *bh = mp->mp_bh[height];
284 return (const __be64 *)(bh->b_data + bh->b_size);
287 static void clone_metapath(struct metapath *clone, struct metapath *mp)
292 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
293 get_bh(clone->mp_bh[hgt]);
296 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
300 for (t = start; t < end; t++) {
301 struct buffer_head *rabh;
306 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
307 if (trylock_buffer(rabh)) {
308 if (!buffer_uptodate(rabh)) {
309 rabh->b_end_io = end_buffer_read_sync;
310 submit_bh(REQ_OP_READ,
311 REQ_RAHEAD | REQ_META | REQ_PRIO,
321 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
322 unsigned int x, unsigned int h)
325 __be64 *ptr = metapointer(x, mp);
326 u64 dblock = be64_to_cpu(*ptr);
331 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
335 mp->mp_aheight = x + 1;
340 * lookup_metapath - Walk the metadata tree to a specific point
344 * Assumes that the inode's buffer has already been looked up and
345 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
346 * by find_metapath().
348 * If this function encounters part of the tree which has not been
349 * allocated, it returns the current height of the tree at the point
350 * at which it found the unallocated block. Blocks which are found are
351 * added to the mp->mp_bh[] list.
356 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
358 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
362 * fillup_metapath - fill up buffers for the metadata path to a specific height
365 * @h: The height to which it should be mapped
367 * Similar to lookup_metapath, but does lookups for a range of heights
369 * Returns: error or the number of buffers filled
372 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
378 /* find the first buffer we need to look up. */
379 for (x = h - 1; x > 0; x--) {
384 ret = __fillup_metapath(ip, mp, x, h);
387 return mp->mp_aheight - x - 1;
390 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
392 sector_t factor = 1, block = 0;
395 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
396 if (hgt < mp->mp_aheight)
397 block += mp->mp_list[hgt] * factor;
398 factor *= sdp->sd_inptrs;
403 static void release_metapath(struct metapath *mp)
407 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
408 if (mp->mp_bh[i] == NULL)
410 brelse(mp->mp_bh[i]);
416 * gfs2_extent_length - Returns length of an extent of blocks
417 * @bh: The metadata block
418 * @ptr: Current position in @bh
419 * @limit: Max extent length to return
420 * @eob: Set to 1 if we hit "end of block"
422 * Returns: The length of the extent (minimum of one block)
425 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
427 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
428 const __be64 *first = ptr;
429 u64 d = be64_to_cpu(*ptr);
437 } while(be64_to_cpu(*ptr) == d);
443 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
446 * gfs2_metadata_walker - walk an indirect block
447 * @mp: Metapath to indirect block
448 * @ptrs: Number of pointers to look at
450 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
451 * indirect block to follow.
453 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
457 * gfs2_walk_metadata - walk a tree of indirect blocks
459 * @mp: Starting point of walk
460 * @max_len: Maximum number of blocks to walk
461 * @walker: Called during the walk
463 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
464 * past the end of metadata, and a negative error code otherwise.
467 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
468 u64 max_len, gfs2_metadata_walker walker)
470 struct gfs2_inode *ip = GFS2_I(inode);
471 struct gfs2_sbd *sdp = GFS2_SB(inode);
477 * The walk starts in the lowest allocated indirect block, which may be
478 * before the position indicated by @mp. Adjust @max_len accordingly
479 * to avoid a short walk.
481 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
482 max_len += mp->mp_list[hgt] * factor;
483 mp->mp_list[hgt] = 0;
484 factor *= sdp->sd_inptrs;
488 u16 start = mp->mp_list[hgt];
489 enum walker_status status;
493 /* Walk indirect block. */
494 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
497 ptrs = DIV_ROUND_UP_ULL(max_len, factor);
498 status = walker(mp, ptrs);
503 BUG_ON(mp->mp_aheight == mp->mp_fheight);
504 ptrs = mp->mp_list[hgt] - start;
513 if (status == WALK_FOLLOW)
514 goto fill_up_metapath;
517 /* Decrease height of metapath. */
518 brelse(mp->mp_bh[hgt]);
519 mp->mp_bh[hgt] = NULL;
520 mp->mp_list[hgt] = 0;
524 factor *= sdp->sd_inptrs;
526 /* Advance in metadata tree. */
527 (mp->mp_list[hgt])++;
529 if (mp->mp_list[hgt] >= sdp->sd_inptrs)
532 if (mp->mp_list[hgt] >= sdp->sd_diptrs)
537 /* Increase height of metapath. */
538 ret = fillup_metapath(ip, mp, ip->i_height - 1);
543 do_div(factor, sdp->sd_inptrs);
544 mp->mp_aheight = hgt + 1;
549 static enum walker_status gfs2_hole_walker(struct metapath *mp,
552 const __be64 *start, *ptr, *end;
555 hgt = mp->mp_aheight - 1;
556 start = metapointer(hgt, mp);
559 for (ptr = start; ptr < end; ptr++) {
561 mp->mp_list[hgt] += ptr - start;
562 if (mp->mp_aheight == mp->mp_fheight)
567 return WALK_CONTINUE;
571 * gfs2_hole_size - figure out the size of a hole
573 * @lblock: The logical starting block number
574 * @len: How far to look (in blocks)
575 * @mp: The metapath at lblock
576 * @iomap: The iomap to store the hole size in
578 * This function modifies @mp.
580 * Returns: errno on error
582 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
583 struct metapath *mp, struct iomap *iomap)
585 struct metapath clone;
589 clone_metapath(&clone, mp);
590 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
595 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
598 iomap->length = hole_size << inode->i_blkbits;
602 release_metapath(&clone);
606 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
607 struct gfs2_glock *gl, unsigned int i,
608 unsigned offset, u64 bn)
610 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
611 ((i > 1) ? sizeof(struct gfs2_meta_header) :
612 sizeof(struct gfs2_dinode)));
614 BUG_ON(mp->mp_bh[i] != NULL);
615 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
616 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
617 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
618 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
620 *ptr = cpu_to_be64(bn);
626 ALLOC_GROW_DEPTH = 1,
627 ALLOC_GROW_HEIGHT = 2,
628 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
632 * gfs2_iomap_alloc - Build a metadata tree of the requested height
633 * @inode: The GFS2 inode
634 * @iomap: The iomap structure
635 * @mp: The metapath, with proper height information calculated
637 * In this routine we may have to alloc:
638 * i) Indirect blocks to grow the metadata tree height
639 * ii) Indirect blocks to fill in lower part of the metadata tree
642 * This function is called after gfs2_iomap_get, which works out the
643 * total number of blocks which we need via gfs2_alloc_size.
645 * We then do the actual allocation asking for an extent at a time (if
646 * enough contiguous free blocks are available, there will only be one
647 * allocation request per call) and uses the state machine to initialise
648 * the blocks in order.
650 * Right now, this function will allocate at most one indirect block
651 * worth of data -- with a default block size of 4K, that's slightly
652 * less than 2M. If this limitation is ever removed to allow huge
653 * allocations, we would probably still want to limit the iomap size we
654 * return to avoid stalling other tasks during huge writes; the next
655 * iomap iteration would then find the blocks already allocated.
657 * Returns: errno on error
660 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
663 struct gfs2_inode *ip = GFS2_I(inode);
664 struct gfs2_sbd *sdp = GFS2_SB(inode);
665 struct buffer_head *dibh = mp->mp_bh[0];
667 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
668 size_t dblks = iomap->length >> inode->i_blkbits;
669 const unsigned end_of_metadata = mp->mp_fheight - 1;
671 enum alloc_state state;
675 BUG_ON(mp->mp_aheight < 1);
676 BUG_ON(dibh == NULL);
679 gfs2_trans_add_meta(ip->i_gl, dibh);
681 down_write(&ip->i_rw_mutex);
683 if (mp->mp_fheight == mp->mp_aheight) {
684 /* Bottom indirect block exists */
687 /* Need to allocate indirect blocks */
688 if (mp->mp_fheight == ip->i_height) {
689 /* Writing into existing tree, extend tree down */
690 iblks = mp->mp_fheight - mp->mp_aheight;
691 state = ALLOC_GROW_DEPTH;
693 /* Building up tree height */
694 state = ALLOC_GROW_HEIGHT;
695 iblks = mp->mp_fheight - ip->i_height;
696 branch_start = metapath_branch_start(mp);
697 iblks += (mp->mp_fheight - branch_start);
701 /* start of the second part of the function (state machine) */
703 blks = dblks + iblks;
707 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
711 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
712 gfs2_trans_remove_revoke(sdp, bn, n);
714 /* Growing height of tree */
715 case ALLOC_GROW_HEIGHT:
717 ptr = (__be64 *)(dibh->b_data +
718 sizeof(struct gfs2_dinode));
721 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
723 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
724 if (i - 1 == mp->mp_fheight - ip->i_height) {
726 gfs2_buffer_copy_tail(mp->mp_bh[i],
727 sizeof(struct gfs2_meta_header),
728 dibh, sizeof(struct gfs2_dinode));
729 gfs2_buffer_clear_tail(dibh,
730 sizeof(struct gfs2_dinode) +
732 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
733 sizeof(struct gfs2_meta_header));
735 state = ALLOC_GROW_DEPTH;
736 for(i = branch_start; i < mp->mp_fheight; i++) {
737 if (mp->mp_bh[i] == NULL)
739 brelse(mp->mp_bh[i]);
746 fallthrough; /* To branching from existing tree */
747 case ALLOC_GROW_DEPTH:
748 if (i > 1 && i < mp->mp_fheight)
749 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
750 for (; i < mp->mp_fheight && n > 0; i++, n--)
751 gfs2_indirect_init(mp, ip->i_gl, i,
752 mp->mp_list[i-1], bn++);
753 if (i == mp->mp_fheight)
757 fallthrough; /* To tree complete, adding data blocks */
760 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
761 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
763 ptr = metapointer(end_of_metadata, mp);
764 iomap->addr = bn << inode->i_blkbits;
765 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
767 *ptr++ = cpu_to_be64(bn++);
770 } while (iomap->addr == IOMAP_NULL_ADDR);
772 iomap->type = IOMAP_MAPPED;
773 iomap->length = (u64)dblks << inode->i_blkbits;
774 ip->i_height = mp->mp_fheight;
775 gfs2_add_inode_blocks(&ip->i_inode, alloced);
776 gfs2_dinode_out(ip, dibh->b_data);
778 up_write(&ip->i_rw_mutex);
782 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
785 * gfs2_alloc_size - Compute the maximum allocation size
788 * @size: Requested size in blocks
790 * Compute the maximum size of the next allocation at @mp.
792 * Returns: size in blocks
794 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
796 struct gfs2_inode *ip = GFS2_I(inode);
797 struct gfs2_sbd *sdp = GFS2_SB(inode);
798 const __be64 *first, *ptr, *end;
801 * For writes to stuffed files, this function is called twice via
802 * gfs2_iomap_get, before and after unstuffing. The size we return the
803 * first time needs to be large enough to get the reservation and
804 * allocation sizes right. The size we return the second time must
805 * be exact or else gfs2_iomap_alloc won't do the right thing.
808 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
809 unsigned int maxsize = mp->mp_fheight > 1 ?
810 sdp->sd_inptrs : sdp->sd_diptrs;
811 maxsize -= mp->mp_list[mp->mp_fheight - 1];
817 first = metapointer(ip->i_height - 1, mp);
818 end = metaend(ip->i_height - 1, mp);
819 if (end - first > size)
821 for (ptr = first; ptr < end; ptr++) {
829 * gfs2_iomap_get - Map blocks from an inode to disk blocks
831 * @pos: Starting position in bytes
832 * @length: Length to map, in bytes
833 * @flags: iomap flags
834 * @iomap: The iomap structure
839 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
840 unsigned flags, struct iomap *iomap,
843 struct gfs2_inode *ip = GFS2_I(inode);
844 struct gfs2_sbd *sdp = GFS2_SB(inode);
845 loff_t size = i_size_read(inode);
848 sector_t lblock_stop;
852 struct buffer_head *dibh = NULL, *bh;
858 down_read(&ip->i_rw_mutex);
860 ret = gfs2_meta_inode_buffer(ip, &dibh);
865 if (gfs2_is_stuffed(ip)) {
866 if (flags & IOMAP_WRITE) {
867 loff_t max_size = gfs2_max_stuffed_size(ip);
869 if (pos + length > max_size)
871 iomap->length = max_size;
874 if (flags & IOMAP_REPORT) {
879 iomap->length = length;
883 iomap->length = size;
885 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
886 sizeof(struct gfs2_dinode);
887 iomap->type = IOMAP_INLINE;
888 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
893 lblock = pos >> inode->i_blkbits;
894 iomap->offset = lblock << inode->i_blkbits;
895 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
896 len = lblock_stop - lblock + 1;
897 iomap->length = len << inode->i_blkbits;
899 height = ip->i_height;
900 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
902 find_metapath(sdp, lblock, mp, height);
903 if (height > ip->i_height || gfs2_is_stuffed(ip))
906 ret = lookup_metapath(ip, mp);
910 if (mp->mp_aheight != ip->i_height)
913 ptr = metapointer(ip->i_height - 1, mp);
917 bh = mp->mp_bh[ip->i_height - 1];
918 len = gfs2_extent_length(bh, ptr, len, &eob);
920 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
921 iomap->length = len << inode->i_blkbits;
922 iomap->type = IOMAP_MAPPED;
923 iomap->flags |= IOMAP_F_MERGED;
925 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
928 iomap->bdev = inode->i_sb->s_bdev;
930 up_read(&ip->i_rw_mutex);
934 if (flags & IOMAP_REPORT) {
937 else if (height == ip->i_height)
938 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
940 iomap->length = size - iomap->offset;
941 } else if (flags & IOMAP_WRITE) {
944 if (flags & IOMAP_DIRECT)
945 goto out; /* (see gfs2_file_direct_write) */
947 len = gfs2_alloc_size(inode, mp, len);
948 alloc_size = len << inode->i_blkbits;
949 if (alloc_size < iomap->length)
950 iomap->length = alloc_size;
952 if (pos < size && height == ip->i_height)
953 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
956 iomap->addr = IOMAP_NULL_ADDR;
957 iomap->type = IOMAP_HOLE;
962 * gfs2_lblk_to_dblk - convert logical block to disk block
963 * @inode: the inode of the file we're mapping
964 * @lblock: the block relative to the start of the file
965 * @dblock: the returned dblock, if no error
967 * This function maps a single block from a file logical block (relative to
968 * the start of the file) to a file system absolute block using iomap.
970 * Returns: the absolute file system block, or an error
972 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
974 struct iomap iomap = { };
975 struct metapath mp = { .mp_aheight = 1, };
976 loff_t pos = (loff_t)lblock << inode->i_blkbits;
979 ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
980 release_metapath(&mp);
982 *dblock = iomap.addr >> inode->i_blkbits;
987 static int gfs2_write_lock(struct inode *inode)
989 struct gfs2_inode *ip = GFS2_I(inode);
990 struct gfs2_sbd *sdp = GFS2_SB(inode);
993 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
994 error = gfs2_glock_nq(&ip->i_gh);
997 if (&ip->i_inode == sdp->sd_rindex) {
998 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1000 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1001 GL_NOCACHE, &m_ip->i_gh);
1008 gfs2_glock_dq(&ip->i_gh);
1010 gfs2_holder_uninit(&ip->i_gh);
1014 static void gfs2_write_unlock(struct inode *inode)
1016 struct gfs2_inode *ip = GFS2_I(inode);
1017 struct gfs2_sbd *sdp = GFS2_SB(inode);
1019 if (&ip->i_inode == sdp->sd_rindex) {
1020 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1022 gfs2_glock_dq_uninit(&m_ip->i_gh);
1024 gfs2_glock_dq_uninit(&ip->i_gh);
1027 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
1028 unsigned len, struct iomap *iomap)
1030 unsigned int blockmask = i_blocksize(inode) - 1;
1031 struct gfs2_sbd *sdp = GFS2_SB(inode);
1032 unsigned int blocks;
1034 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
1035 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
1038 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1039 unsigned copied, struct page *page,
1040 struct iomap *iomap)
1042 struct gfs2_trans *tr = current->journal_info;
1043 struct gfs2_inode *ip = GFS2_I(inode);
1044 struct gfs2_sbd *sdp = GFS2_SB(inode);
1046 if (page && !gfs2_is_stuffed(ip))
1047 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1049 if (tr->tr_num_buf_new)
1050 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1052 gfs2_trans_end(sdp);
1055 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1056 .page_prepare = gfs2_iomap_page_prepare,
1057 .page_done = gfs2_iomap_page_done,
1060 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1061 loff_t length, unsigned flags,
1062 struct iomap *iomap,
1063 struct metapath *mp)
1065 struct gfs2_inode *ip = GFS2_I(inode);
1066 struct gfs2_sbd *sdp = GFS2_SB(inode);
1070 unstuff = gfs2_is_stuffed(ip) &&
1071 pos + length > gfs2_max_stuffed_size(ip);
1073 if (unstuff || iomap->type == IOMAP_HOLE) {
1074 unsigned int data_blocks, ind_blocks;
1075 struct gfs2_alloc_parms ap = {};
1076 unsigned int rblocks;
1077 struct gfs2_trans *tr;
1079 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1081 ap.target = data_blocks + ind_blocks;
1082 ret = gfs2_quota_lock_check(ip, &ap);
1086 ret = gfs2_inplace_reserve(ip, &ap);
1090 rblocks = RES_DINODE + ind_blocks;
1091 if (gfs2_is_jdata(ip))
1092 rblocks += data_blocks;
1093 if (ind_blocks || data_blocks)
1094 rblocks += RES_STATFS + RES_QUOTA;
1095 if (inode == sdp->sd_rindex)
1096 rblocks += 2 * RES_STATFS;
1097 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1099 ret = gfs2_trans_begin(sdp, rblocks,
1100 iomap->length >> inode->i_blkbits);
1102 goto out_trans_fail;
1105 ret = gfs2_unstuff_dinode(ip, NULL);
1108 release_metapath(mp);
1109 ret = gfs2_iomap_get(inode, iomap->offset,
1110 iomap->length, flags, iomap, mp);
1115 if (iomap->type == IOMAP_HOLE) {
1116 ret = gfs2_iomap_alloc(inode, iomap, mp);
1118 gfs2_trans_end(sdp);
1119 gfs2_inplace_release(ip);
1120 punch_hole(ip, iomap->offset, iomap->length);
1125 tr = current->journal_info;
1126 if (tr->tr_num_buf_new)
1127 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1129 gfs2_trans_end(sdp);
1132 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1133 iomap->page_ops = &gfs2_iomap_page_ops;
1137 gfs2_trans_end(sdp);
1139 gfs2_inplace_release(ip);
1141 gfs2_quota_unlock(ip);
1145 static inline bool gfs2_iomap_need_write_lock(unsigned flags)
1147 return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
1150 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1151 unsigned flags, struct iomap *iomap,
1152 struct iomap *srcmap)
1154 struct gfs2_inode *ip = GFS2_I(inode);
1155 struct metapath mp = { .mp_aheight = 1, };
1158 if (gfs2_is_jdata(ip))
1159 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1161 trace_gfs2_iomap_start(ip, pos, length, flags);
1162 if (gfs2_iomap_need_write_lock(flags)) {
1163 ret = gfs2_write_lock(inode);
1168 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1172 switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1174 if (flags & IOMAP_DIRECT) {
1176 * Silently fall back to buffered I/O for stuffed files
1177 * or if we've got a hole (see gfs2_file_direct_write).
1179 if (iomap->type != IOMAP_MAPPED)
1185 if (iomap->type == IOMAP_HOLE)
1192 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1195 if (ret && gfs2_iomap_need_write_lock(flags))
1196 gfs2_write_unlock(inode);
1197 release_metapath(&mp);
1199 trace_gfs2_iomap_end(ip, iomap, ret);
1203 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1204 ssize_t written, unsigned flags, struct iomap *iomap)
1206 struct gfs2_inode *ip = GFS2_I(inode);
1207 struct gfs2_sbd *sdp = GFS2_SB(inode);
1209 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1211 if (flags & IOMAP_DIRECT)
1215 if (iomap->type == IOMAP_HOLE)
1222 if (!gfs2_is_stuffed(ip))
1223 gfs2_ordered_add_inode(ip);
1225 if (inode == sdp->sd_rindex)
1226 adjust_fs_space(inode);
1228 gfs2_inplace_release(ip);
1230 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1231 gfs2_quota_unlock(ip);
1233 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1234 /* Deallocate blocks that were just allocated. */
1235 loff_t hstart = round_up(pos + written, i_blocksize(inode));
1236 loff_t hend = iomap->offset + iomap->length;
1238 if (hstart < hend) {
1239 truncate_pagecache_range(inode, hstart, hend - 1);
1240 punch_hole(ip, hstart, hend - hstart);
1244 if (unlikely(!written))
1247 if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1248 mark_inode_dirty(inode);
1249 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1252 if (gfs2_iomap_need_write_lock(flags))
1253 gfs2_write_unlock(inode);
1257 const struct iomap_ops gfs2_iomap_ops = {
1258 .iomap_begin = gfs2_iomap_begin,
1259 .iomap_end = gfs2_iomap_end,
1263 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1265 * @lblock: The logical block number
1266 * @bh_map: The bh to be mapped
1267 * @create: True if its ok to alloc blocks to satify the request
1269 * The size of the requested mapping is defined in bh_map->b_size.
1271 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1272 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
1273 * bh_map->b_size to indicate the size of the mapping when @lblock and
1274 * successive blocks are mapped, up to the requested size.
1276 * Sets buffer_boundary() if a read of metadata will be required
1277 * before the next block can be mapped. Sets buffer_new() if new
1278 * blocks were allocated.
1283 int gfs2_block_map(struct inode *inode, sector_t lblock,
1284 struct buffer_head *bh_map, int create)
1286 struct gfs2_inode *ip = GFS2_I(inode);
1287 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1288 loff_t length = bh_map->b_size;
1289 struct metapath mp = { .mp_aheight = 1, };
1290 struct iomap iomap = { };
1291 int flags = create ? IOMAP_WRITE : 0;
1294 clear_buffer_mapped(bh_map);
1295 clear_buffer_new(bh_map);
1296 clear_buffer_boundary(bh_map);
1297 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1299 ret = gfs2_iomap_get(inode, pos, length, flags, &iomap, &mp);
1300 if (create && !ret && iomap.type == IOMAP_HOLE)
1301 ret = gfs2_iomap_alloc(inode, &iomap, &mp);
1302 release_metapath(&mp);
1306 if (iomap.length > bh_map->b_size) {
1307 iomap.length = bh_map->b_size;
1308 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1310 if (iomap.addr != IOMAP_NULL_ADDR)
1311 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1312 bh_map->b_size = iomap.length;
1313 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1314 set_buffer_boundary(bh_map);
1315 if (iomap.flags & IOMAP_F_NEW)
1316 set_buffer_new(bh_map);
1319 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1324 * Deprecated: do not use in new code
1326 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1328 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1336 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1337 ret = gfs2_block_map(inode, lblock, &bh, create);
1338 *extlen = bh.b_size >> inode->i_blkbits;
1339 *dblock = bh.b_blocknr;
1340 if (buffer_new(&bh))
1348 * NOTE: Never call gfs2_block_zero_range with an open transaction because it
1349 * uses iomap write to perform its actions, which begin their own transactions
1350 * (iomap_begin, page_prepare, etc.)
1352 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1353 unsigned int length)
1355 BUG_ON(current->journal_info);
1356 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
1359 #define GFS2_JTRUNC_REVOKES 8192
1362 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1363 * @inode: The inode being truncated
1364 * @oldsize: The original (larger) size
1365 * @newsize: The new smaller size
1367 * With jdata files, we have to journal a revoke for each block which is
1368 * truncated. As a result, we need to split this into separate transactions
1369 * if the number of pages being truncated gets too large.
1372 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1374 struct gfs2_sbd *sdp = GFS2_SB(inode);
1375 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1379 while (oldsize != newsize) {
1380 struct gfs2_trans *tr;
1383 chunk = oldsize - newsize;
1384 if (chunk > max_chunk)
1387 offs = oldsize & ~PAGE_MASK;
1388 if (offs && chunk > PAGE_SIZE)
1389 chunk = offs + ((chunk - offs) & PAGE_MASK);
1391 truncate_pagecache(inode, oldsize - chunk);
1394 tr = current->journal_info;
1395 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1398 gfs2_trans_end(sdp);
1399 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1407 static int trunc_start(struct inode *inode, u64 newsize)
1409 struct gfs2_inode *ip = GFS2_I(inode);
1410 struct gfs2_sbd *sdp = GFS2_SB(inode);
1411 struct buffer_head *dibh = NULL;
1412 int journaled = gfs2_is_jdata(ip);
1413 u64 oldsize = inode->i_size;
1416 if (!gfs2_is_stuffed(ip)) {
1417 unsigned int blocksize = i_blocksize(inode);
1418 unsigned int offs = newsize & (blocksize - 1);
1420 error = gfs2_block_zero_range(inode, newsize,
1427 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1429 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1433 error = gfs2_meta_inode_buffer(ip, &dibh);
1437 gfs2_trans_add_meta(ip->i_gl, dibh);
1439 if (gfs2_is_stuffed(ip))
1440 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1442 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1444 i_size_write(inode, newsize);
1445 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1446 gfs2_dinode_out(ip, dibh->b_data);
1449 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1451 truncate_pagecache(inode, newsize);
1455 if (current->journal_info)
1456 gfs2_trans_end(sdp);
1460 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1461 struct iomap *iomap)
1463 struct metapath mp = { .mp_aheight = 1, };
1466 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1467 if (!ret && iomap->type == IOMAP_HOLE)
1468 ret = gfs2_iomap_alloc(inode, iomap, &mp);
1469 release_metapath(&mp);
1474 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1476 * @rg_gh: holder of resource group glock
1477 * @bh: buffer head to sweep
1478 * @start: starting point in bh
1479 * @end: end point in bh
1480 * @meta: true if bh points to metadata (rather than data)
1481 * @btotal: place to keep count of total blocks freed
1483 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1484 * free, and free them all. However, we do it one rgrp at a time. If this
1485 * block has references to multiple rgrps, we break it into individual
1486 * transactions. This allows other processes to use the rgrps while we're
1487 * focused on a single one, for better concurrency / performance.
1488 * At every transaction boundary, we rewrite the inode into the journal.
1489 * That way the bitmaps are kept consistent with the inode and we can recover
1490 * if we're interrupted by power-outages.
1492 * Returns: 0, or return code if an error occurred.
1493 * *btotal has the total number of blocks freed
1495 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1496 struct buffer_head *bh, __be64 *start, __be64 *end,
1497 bool meta, u32 *btotal)
1499 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1500 struct gfs2_rgrpd *rgd;
1501 struct gfs2_trans *tr;
1503 int blks_outside_rgrp;
1504 u64 bn, bstart, isize_blks;
1505 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1507 bool buf_in_tr = false; /* buffer was added to transaction */
1511 if (gfs2_holder_initialized(rd_gh)) {
1512 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1513 gfs2_assert_withdraw(sdp,
1514 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1516 blks_outside_rgrp = 0;
1520 for (p = start; p < end; p++) {
1523 bn = be64_to_cpu(*p);
1526 if (!rgrp_contains_block(rgd, bn)) {
1527 blks_outside_rgrp++;
1531 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1532 if (unlikely(!rgd)) {
1536 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1541 /* Must be done with the rgrp glock held: */
1542 if (gfs2_rs_active(&ip->i_res) &&
1543 rgd == ip->i_res.rs_rbm.rgd)
1544 gfs2_rs_deltree(&ip->i_res);
1547 /* The size of our transactions will be unknown until we
1548 actually process all the metadata blocks that relate to
1549 the rgrp. So we estimate. We know it can't be more than
1550 the dinode's i_blocks and we don't want to exceed the
1551 journal flush threshold, sd_log_thresh2. */
1552 if (current->journal_info == NULL) {
1553 unsigned int jblocks_rqsted, revokes;
1555 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1557 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1558 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1560 atomic_read(&sdp->sd_log_thresh2);
1562 jblocks_rqsted += isize_blks;
1563 revokes = jblocks_rqsted;
1565 revokes += end - start;
1566 else if (ip->i_depth)
1567 revokes += sdp->sd_inptrs;
1568 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1571 down_write(&ip->i_rw_mutex);
1573 /* check if we will exceed the transaction blocks requested */
1574 tr = current->journal_info;
1575 if (tr->tr_num_buf_new + RES_STATFS +
1576 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1577 /* We set blks_outside_rgrp to ensure the loop will
1578 be repeated for the same rgrp, but with a new
1580 blks_outside_rgrp++;
1581 /* This next part is tricky. If the buffer was added
1582 to the transaction, we've already set some block
1583 pointers to 0, so we better follow through and free
1584 them, or we will introduce corruption (so break).
1585 This may be impossible, or at least rare, but I
1586 decided to cover the case regardless.
1588 If the buffer was not added to the transaction
1589 (this call), doing so would exceed our transaction
1590 size, so we need to end the transaction and start a
1591 new one (so goto). */
1598 gfs2_trans_add_meta(ip->i_gl, bh);
1601 if (bstart + blen == bn) {
1606 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1608 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1614 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1616 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1619 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1620 outside the rgrp we just processed,
1621 do it all over again. */
1622 if (current->journal_info) {
1623 struct buffer_head *dibh;
1625 ret = gfs2_meta_inode_buffer(ip, &dibh);
1629 /* Every transaction boundary, we rewrite the dinode
1630 to keep its di_blocks current in case of failure. */
1631 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1632 current_time(&ip->i_inode);
1633 gfs2_trans_add_meta(ip->i_gl, dibh);
1634 gfs2_dinode_out(ip, dibh->b_data);
1636 up_write(&ip->i_rw_mutex);
1637 gfs2_trans_end(sdp);
1640 gfs2_glock_dq_uninit(rd_gh);
1648 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1650 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1656 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1657 * @mp: starting metapath
1658 * @h: desired height to search
1660 * Assumes the metapath is valid (with buffers) out to height h.
1661 * Returns: true if a non-null pointer was found in the metapath buffer
1662 * false if all remaining pointers are NULL in the buffer
1664 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1666 __u16 *end_list, unsigned int end_aligned)
1668 struct buffer_head *bh = mp->mp_bh[h];
1669 __be64 *first, *ptr, *end;
1671 first = metaptr1(h, mp);
1672 ptr = first + mp->mp_list[h];
1673 end = (__be64 *)(bh->b_data + bh->b_size);
1674 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1675 bool keep_end = h < end_aligned;
1676 end = first + end_list[h] + keep_end;
1680 if (*ptr) { /* if we have a non-null pointer */
1681 mp->mp_list[h] = ptr - first;
1683 if (h < GFS2_MAX_META_HEIGHT)
1692 enum dealloc_states {
1693 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1694 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1695 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1696 DEALLOC_DONE = 3, /* process complete */
1700 metapointer_range(struct metapath *mp, int height,
1701 __u16 *start_list, unsigned int start_aligned,
1702 __u16 *end_list, unsigned int end_aligned,
1703 __be64 **start, __be64 **end)
1705 struct buffer_head *bh = mp->mp_bh[height];
1708 first = metaptr1(height, mp);
1710 if (mp_eq_to_hgt(mp, start_list, height)) {
1711 bool keep_start = height < start_aligned;
1712 *start = first + start_list[height] + keep_start;
1714 *end = (__be64 *)(bh->b_data + bh->b_size);
1715 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1716 bool keep_end = height < end_aligned;
1717 *end = first + end_list[height] + keep_end;
1721 static inline bool walk_done(struct gfs2_sbd *sdp,
1722 struct metapath *mp, int height,
1723 __u16 *end_list, unsigned int end_aligned)
1728 bool keep_end = height < end_aligned;
1729 if (!mp_eq_to_hgt(mp, end_list, height))
1731 end = end_list[height] + keep_end;
1733 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1734 return mp->mp_list[height] >= end;
1738 * punch_hole - deallocate blocks in a file
1739 * @ip: inode to truncate
1740 * @offset: the start of the hole
1741 * @length: the size of the hole (or 0 for truncate)
1743 * Punch a hole into a file or truncate a file at a given position. This
1744 * function operates in whole blocks (@offset and @length are rounded
1745 * accordingly); partially filled blocks must be cleared otherwise.
1747 * This function works from the bottom up, and from the right to the left. In
1748 * other words, it strips off the highest layer (data) before stripping any of
1749 * the metadata. Doing it this way is best in case the operation is interrupted
1750 * by power failure, etc. The dinode is rewritten in every transaction to
1751 * guarantee integrity.
1753 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1755 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1756 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1757 struct metapath mp = {};
1758 struct buffer_head *dibh, *bh;
1759 struct gfs2_holder rd_gh;
1760 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1761 unsigned int bsize = 1 << bsize_shift;
1762 u64 lblock = (offset + bsize - 1) >> bsize_shift;
1763 __u16 start_list[GFS2_MAX_META_HEIGHT];
1764 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1765 unsigned int start_aligned, end_aligned;
1766 unsigned int strip_h = ip->i_height - 1;
1769 int mp_h; /* metapath buffers are read in to this height */
1771 __be64 *start, *end;
1773 if (offset + bsize - 1 >= maxsize) {
1775 * The starting point lies beyond the allocated meta-data;
1776 * there are no blocks do deallocate.
1782 * The start position of the hole is defined by lblock, start_list, and
1783 * start_aligned. The end position of the hole is defined by lend,
1784 * end_list, and end_aligned.
1786 * start_aligned and end_aligned define down to which height the start
1787 * and end positions are aligned to the metadata tree (i.e., the
1788 * position is a multiple of the metadata granularity at the height
1789 * above). This determines at which heights additional meta pointers
1790 * needs to be preserved for the remaining data.
1794 u64 end_offset = offset + length;
1798 * Clip the end at the maximum file size for the given height:
1799 * that's how far the metadata goes; files bigger than that
1800 * will have additional layers of indirection.
1802 if (end_offset > maxsize)
1803 end_offset = maxsize;
1804 lend = end_offset >> bsize_shift;
1809 find_metapath(sdp, lend, &mp, ip->i_height);
1810 end_list = __end_list;
1811 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1813 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1820 find_metapath(sdp, lblock, &mp, ip->i_height);
1821 memcpy(start_list, mp.mp_list, sizeof(start_list));
1823 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1824 if (start_list[mp_h])
1827 start_aligned = mp_h;
1829 ret = gfs2_meta_inode_buffer(ip, &dibh);
1834 ret = lookup_metapath(ip, &mp);
1838 /* issue read-ahead on metadata */
1839 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1840 metapointer_range(&mp, mp_h, start_list, start_aligned,
1841 end_list, end_aligned, &start, &end);
1842 gfs2_metapath_ra(ip->i_gl, start, end);
1845 if (mp.mp_aheight == ip->i_height)
1846 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1848 state = DEALLOC_FILL_MP; /* deal with partial metapath */
1850 ret = gfs2_rindex_update(sdp);
1854 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1857 gfs2_holder_mark_uninitialized(&rd_gh);
1861 while (state != DEALLOC_DONE) {
1863 /* Truncate a full metapath at the given strip height.
1864 * Note that strip_h == mp_h in order to be in this state. */
1865 case DEALLOC_MP_FULL:
1866 bh = mp.mp_bh[mp_h];
1867 gfs2_assert_withdraw(sdp, bh);
1868 if (gfs2_assert_withdraw(sdp,
1869 prev_bnr != bh->b_blocknr)) {
1870 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1871 "s_h:%u, mp_h:%u\n",
1872 (unsigned long long)ip->i_no_addr,
1873 prev_bnr, ip->i_height, strip_h, mp_h);
1875 prev_bnr = bh->b_blocknr;
1877 if (gfs2_metatype_check(sdp, bh,
1878 (mp_h ? GFS2_METATYPE_IN :
1879 GFS2_METATYPE_DI))) {
1885 * Below, passing end_aligned as 0 gives us the
1886 * metapointer range excluding the end point: the end
1887 * point is the first metapath we must not deallocate!
1890 metapointer_range(&mp, mp_h, start_list, start_aligned,
1891 end_list, 0 /* end_aligned */,
1893 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1895 mp_h != ip->i_height - 1,
1898 /* If we hit an error or just swept dinode buffer,
1901 state = DEALLOC_DONE;
1904 state = DEALLOC_MP_LOWER;
1907 /* lower the metapath strip height */
1908 case DEALLOC_MP_LOWER:
1909 /* We're done with the current buffer, so release it,
1910 unless it's the dinode buffer. Then back up to the
1911 previous pointer. */
1913 brelse(mp.mp_bh[mp_h]);
1914 mp.mp_bh[mp_h] = NULL;
1916 /* If we can't get any lower in height, we've stripped
1917 off all we can. Next step is to back up and start
1918 stripping the previous level of metadata. */
1921 memcpy(mp.mp_list, start_list, sizeof(start_list));
1923 state = DEALLOC_FILL_MP;
1926 mp.mp_list[mp_h] = 0;
1927 mp_h--; /* search one metadata height down */
1929 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1931 /* Here we've found a part of the metapath that is not
1932 * allocated. We need to search at that height for the
1933 * next non-null pointer. */
1934 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1935 state = DEALLOC_FILL_MP;
1938 /* No more non-null pointers at this height. Back up
1939 to the previous height and try again. */
1940 break; /* loop around in the same state */
1942 /* Fill the metapath with buffers to the given height. */
1943 case DEALLOC_FILL_MP:
1944 /* Fill the buffers out to the current height. */
1945 ret = fillup_metapath(ip, &mp, mp_h);
1949 /* On the first pass, issue read-ahead on metadata. */
1950 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1951 unsigned int height = mp.mp_aheight - 1;
1953 /* No read-ahead for data blocks. */
1954 if (mp.mp_aheight - 1 == strip_h)
1957 for (; height >= mp.mp_aheight - ret; height--) {
1958 metapointer_range(&mp, height,
1959 start_list, start_aligned,
1960 end_list, end_aligned,
1962 gfs2_metapath_ra(ip->i_gl, start, end);
1966 /* If buffers found for the entire strip height */
1967 if (mp.mp_aheight - 1 == strip_h) {
1968 state = DEALLOC_MP_FULL;
1971 if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1972 mp_h = mp.mp_aheight - 1;
1974 /* If we find a non-null block pointer, crawl a bit
1975 higher up in the metapath and try again, otherwise
1976 we need to look lower for a new starting point. */
1977 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1980 state = DEALLOC_MP_LOWER;
1986 if (current->journal_info == NULL) {
1987 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1991 down_write(&ip->i_rw_mutex);
1993 gfs2_statfs_change(sdp, 0, +btotal, 0);
1994 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1996 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1997 gfs2_trans_add_meta(ip->i_gl, dibh);
1998 gfs2_dinode_out(ip, dibh->b_data);
1999 up_write(&ip->i_rw_mutex);
2000 gfs2_trans_end(sdp);
2004 if (gfs2_holder_initialized(&rd_gh))
2005 gfs2_glock_dq_uninit(&rd_gh);
2006 if (current->journal_info) {
2007 up_write(&ip->i_rw_mutex);
2008 gfs2_trans_end(sdp);
2011 gfs2_quota_unhold(ip);
2013 release_metapath(&mp);
2017 static int trunc_end(struct gfs2_inode *ip)
2019 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2020 struct buffer_head *dibh;
2023 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2027 down_write(&ip->i_rw_mutex);
2029 error = gfs2_meta_inode_buffer(ip, &dibh);
2033 if (!i_size_read(&ip->i_inode)) {
2035 ip->i_goal = ip->i_no_addr;
2036 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2037 gfs2_ordered_del_inode(ip);
2039 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2040 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2042 gfs2_trans_add_meta(ip->i_gl, dibh);
2043 gfs2_dinode_out(ip, dibh->b_data);
2047 up_write(&ip->i_rw_mutex);
2048 gfs2_trans_end(sdp);
2053 * do_shrink - make a file smaller
2055 * @newsize: the size to make the file
2057 * Called with an exclusive lock on @inode. The @size must
2058 * be equal to or smaller than the current inode size.
2063 static int do_shrink(struct inode *inode, u64 newsize)
2065 struct gfs2_inode *ip = GFS2_I(inode);
2068 error = trunc_start(inode, newsize);
2071 if (gfs2_is_stuffed(ip))
2074 error = punch_hole(ip, newsize, 0);
2076 error = trunc_end(ip);
2081 void gfs2_trim_blocks(struct inode *inode)
2085 ret = do_shrink(inode, inode->i_size);
2090 * do_grow - Touch and update inode size
2092 * @size: The new size
2094 * This function updates the timestamps on the inode and
2095 * may also increase the size of the inode. This function
2096 * must not be called with @size any smaller than the current
2099 * Although it is not strictly required to unstuff files here,
2100 * earlier versions of GFS2 have a bug in the stuffed file reading
2101 * code which will result in a buffer overrun if the size is larger
2102 * than the max stuffed file size. In order to prevent this from
2103 * occurring, such files are unstuffed, but in other cases we can
2104 * just update the inode size directly.
2106 * Returns: 0 on success, or -ve on error
2109 static int do_grow(struct inode *inode, u64 size)
2111 struct gfs2_inode *ip = GFS2_I(inode);
2112 struct gfs2_sbd *sdp = GFS2_SB(inode);
2113 struct gfs2_alloc_parms ap = { .target = 1, };
2114 struct buffer_head *dibh;
2118 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2119 error = gfs2_quota_lock_check(ip, &ap);
2123 error = gfs2_inplace_reserve(ip, &ap);
2125 goto do_grow_qunlock;
2129 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2131 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2132 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2135 goto do_grow_release;
2138 error = gfs2_unstuff_dinode(ip, NULL);
2143 error = gfs2_meta_inode_buffer(ip, &dibh);
2147 truncate_setsize(inode, size);
2148 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2149 gfs2_trans_add_meta(ip->i_gl, dibh);
2150 gfs2_dinode_out(ip, dibh->b_data);
2154 gfs2_trans_end(sdp);
2157 gfs2_inplace_release(ip);
2159 gfs2_quota_unlock(ip);
2165 * gfs2_setattr_size - make a file a given size
2167 * @newsize: the size to make the file
2169 * The file size can grow, shrink, or stay the same size. This
2170 * is called holding i_rwsem and an exclusive glock on the inode
2176 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2178 struct gfs2_inode *ip = GFS2_I(inode);
2181 BUG_ON(!S_ISREG(inode->i_mode));
2183 ret = inode_newsize_ok(inode, newsize);
2187 inode_dio_wait(inode);
2189 ret = gfs2_qa_get(ip);
2193 if (newsize >= inode->i_size) {
2194 ret = do_grow(inode, newsize);
2198 ret = do_shrink(inode, newsize);
2205 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2208 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2210 error = trunc_end(ip);
2214 int gfs2_file_dealloc(struct gfs2_inode *ip)
2216 return punch_hole(ip, 0, 0);
2220 * gfs2_free_journal_extents - Free cached journal bmap info
2225 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2227 struct gfs2_journal_extent *jext;
2229 while(!list_empty(&jd->extent_list)) {
2230 jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2231 list_del(&jext->list);
2237 * gfs2_add_jextent - Add or merge a new extent to extent cache
2238 * @jd: The journal descriptor
2239 * @lblock: The logical block at start of new extent
2240 * @dblock: The physical block at start of new extent
2241 * @blocks: Size of extent in fs blocks
2243 * Returns: 0 on success or -ENOMEM
2246 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2248 struct gfs2_journal_extent *jext;
2250 if (!list_empty(&jd->extent_list)) {
2251 jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2252 if ((jext->dblock + jext->blocks) == dblock) {
2253 jext->blocks += blocks;
2258 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2261 jext->dblock = dblock;
2262 jext->lblock = lblock;
2263 jext->blocks = blocks;
2264 list_add_tail(&jext->list, &jd->extent_list);
2270 * gfs2_map_journal_extents - Cache journal bmap info
2271 * @sdp: The super block
2272 * @jd: The journal to map
2274 * Create a reusable "extent" mapping from all logical
2275 * blocks to all physical blocks for the given journal. This will save
2276 * us time when writing journal blocks. Most journals will have only one
2277 * extent that maps all their logical blocks. That's because gfs2.mkfs
2278 * arranges the journal blocks sequentially to maximize performance.
2279 * So the extent would map the first block for the entire file length.
2280 * However, gfs2_jadd can happen while file activity is happening, so
2281 * those journals may not be sequential. Less likely is the case where
2282 * the users created their own journals by mounting the metafs and
2283 * laying it out. But it's still possible. These journals might have
2286 * Returns: 0 on success, or error on failure
2289 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2293 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2294 struct buffer_head bh;
2295 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2300 start = ktime_get();
2301 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2302 size = (lblock_stop - lblock) << shift;
2304 WARN_ON(!list_empty(&jd->extent_list));
2310 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2311 if (rc || !buffer_mapped(&bh))
2313 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2317 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2321 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2322 jd->nr_extents, ktime_ms_delta(end, start));
2326 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2328 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2330 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2331 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2332 bh.b_state, (unsigned long long)bh.b_size);
2333 gfs2_free_journal_extents(jd);
2338 * gfs2_write_alloc_required - figure out if a write will require an allocation
2339 * @ip: the file being written to
2340 * @offset: the offset to write to
2341 * @len: the number of bytes being written
2343 * Returns: 1 if an alloc is required, 0 otherwise
2346 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2349 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2350 struct buffer_head bh;
2352 u64 lblock, lblock_stop, size;
2358 if (gfs2_is_stuffed(ip)) {
2359 if (offset + len > gfs2_max_stuffed_size(ip))
2364 shift = sdp->sd_sb.sb_bsize_shift;
2365 BUG_ON(gfs2_is_dir(ip));
2366 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2367 lblock = offset >> shift;
2368 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2369 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2372 size = (lblock_stop - lblock) << shift;
2376 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2377 if (!buffer_mapped(&bh))
2380 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2386 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2388 struct gfs2_inode *ip = GFS2_I(inode);
2389 struct buffer_head *dibh;
2392 if (offset >= inode->i_size)
2394 if (offset + length > inode->i_size)
2395 length = inode->i_size - offset;
2397 error = gfs2_meta_inode_buffer(ip, &dibh);
2400 gfs2_trans_add_meta(ip->i_gl, dibh);
2401 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2407 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2410 struct gfs2_sbd *sdp = GFS2_SB(inode);
2411 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2415 struct gfs2_trans *tr;
2420 if (chunk > max_chunk)
2423 offs = offset & ~PAGE_MASK;
2424 if (offs && chunk > PAGE_SIZE)
2425 chunk = offs + ((chunk - offs) & PAGE_MASK);
2427 truncate_pagecache_range(inode, offset, chunk);
2431 tr = current->journal_info;
2432 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2435 gfs2_trans_end(sdp);
2436 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2443 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2445 struct inode *inode = file_inode(file);
2446 struct gfs2_inode *ip = GFS2_I(inode);
2447 struct gfs2_sbd *sdp = GFS2_SB(inode);
2448 unsigned int blocksize = i_blocksize(inode);
2452 if (!gfs2_is_stuffed(ip)) {
2453 unsigned int start_off, end_len;
2455 start_off = offset & (blocksize - 1);
2456 end_len = (offset + length) & (blocksize - 1);
2458 unsigned int len = length;
2459 if (length > blocksize - start_off)
2460 len = blocksize - start_off;
2461 error = gfs2_block_zero_range(inode, offset, len);
2464 if (start_off + length < blocksize)
2468 error = gfs2_block_zero_range(inode,
2469 offset + length - end_len, end_len);
2475 start = round_down(offset, blocksize);
2476 end = round_up(offset + length, blocksize) - 1;
2477 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
2481 if (gfs2_is_jdata(ip))
2482 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2483 GFS2_JTRUNC_REVOKES);
2485 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2489 if (gfs2_is_stuffed(ip)) {
2490 error = stuffed_zero_range(inode, offset, length);
2495 if (gfs2_is_jdata(ip)) {
2496 BUG_ON(!current->journal_info);
2497 gfs2_journaled_truncate_range(inode, offset, length);
2499 truncate_pagecache_range(inode, offset, offset + length - 1);
2501 file_update_time(file);
2502 mark_inode_dirty(inode);
2504 if (current->journal_info)
2505 gfs2_trans_end(sdp);
2507 if (!gfs2_is_stuffed(ip))
2508 error = punch_hole(ip, offset, length);
2511 if (current->journal_info)
2512 gfs2_trans_end(sdp);
2516 static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
2519 struct metapath mp = { .mp_aheight = 1, };
2522 if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
2525 if (offset >= wpc->iomap.offset &&
2526 offset < wpc->iomap.offset + wpc->iomap.length)
2529 memset(&wpc->iomap, 0, sizeof(wpc->iomap));
2530 ret = gfs2_iomap_get(inode, offset, INT_MAX, 0, &wpc->iomap, &mp);
2531 release_metapath(&mp);
2535 const struct iomap_writeback_ops gfs2_writeback_ops = {
2536 .map_blocks = gfs2_map_blocks,