1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/prefetch.h>
16 #include <linux/blkdev.h>
17 #include <linux/rbtree.h>
18 #include <linux/random.h>
33 #include "trace_gfs2.h"
36 #define BFITNOENT ((u32)~0)
37 #define NO_BLOCK ((u64)~0)
39 #if BITS_PER_LONG == 32
40 #define LBITMASK (0x55555555UL)
41 #define LBITSKIP55 (0x55555555UL)
42 #define LBITSKIP00 (0x00000000UL)
44 #define LBITMASK (0x5555555555555555UL)
45 #define LBITSKIP55 (0x5555555555555555UL)
46 #define LBITSKIP00 (0x0000000000000000UL)
50 * These routines are used by the resource group routines (rgrp.c)
51 * to keep track of block allocation. Each block is represented by two
52 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 * 1 = Used (not metadata)
56 * 2 = Unlinked (still in use) inode
65 static const char valid_change[16] = {
73 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
74 const struct gfs2_inode *ip, bool nowrap);
78 * gfs2_setbit - Set a bit in the bitmaps
79 * @rbm: The position of the bit to set
80 * @do_clone: Also set the clone bitmap, if it exists
81 * @new_state: the new state of the block
85 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
86 unsigned char new_state)
88 unsigned char *byte1, *byte2, *end, cur_state;
89 struct gfs2_bitmap *bi = rbm_bi(rbm);
90 unsigned int buflen = bi->bi_bytes;
91 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
93 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
94 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
98 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
100 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
101 struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;
103 fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n",
104 rbm->offset, cur_state, new_state);
105 fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n",
106 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start,
107 (unsigned long long)bi->bi_bh->b_blocknr);
108 fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n",
109 bi->bi_offset, bi->bi_bytes,
110 (unsigned long long)gfs2_rbm_to_block(rbm));
112 gfs2_consist_rgrpd(rbm->rgd);
115 *byte1 ^= (cur_state ^ new_state) << bit;
117 if (do_clone && bi->bi_clone) {
118 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
119 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
120 *byte2 ^= (cur_state ^ new_state) << bit;
125 * gfs2_testbit - test a bit in the bitmaps
126 * @rbm: The bit to test
127 * @use_clone: If true, test the clone bitmap, not the official bitmap.
129 * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps,
130 * not the "real" bitmaps, to avoid allocating recently freed blocks.
132 * Returns: The two bit block state of the requested bit
135 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone)
137 struct gfs2_bitmap *bi = rbm_bi(rbm);
142 if (use_clone && bi->bi_clone)
143 buffer = bi->bi_clone;
145 buffer = bi->bi_bh->b_data;
146 buffer += bi->bi_offset;
147 byte = buffer + (rbm->offset / GFS2_NBBY);
148 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
150 return (*byte >> bit) & GFS2_BIT_MASK;
155 * @ptr: Pointer to bitmap data
156 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
157 * @state: The state we are searching for
159 * We xor the bitmap data with a patter which is the bitwise opposite
160 * of what we are looking for, this gives rise to a pattern of ones
161 * wherever there is a match. Since we have two bits per entry, we
162 * take this pattern, shift it down by one place and then and it with
163 * the original. All the even bit positions (0,2,4, etc) then represent
164 * successful matches, so we mask with 0x55555..... to remove the unwanted
167 * This allows searching of a whole u64 at once (32 blocks) with a
168 * single test (on 64 bit arches).
171 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
174 static const u64 search[] = {
175 [0] = 0xffffffffffffffffULL,
176 [1] = 0xaaaaaaaaaaaaaaaaULL,
177 [2] = 0x5555555555555555ULL,
178 [3] = 0x0000000000000000ULL,
180 tmp = le64_to_cpu(*ptr) ^ search[state];
187 * rs_cmp - multi-block reservation range compare
188 * @blk: absolute file system block number of the new reservation
189 * @len: number of blocks in the new reservation
190 * @rs: existing reservation to compare against
192 * returns: 1 if the block range is beyond the reach of the reservation
193 * -1 if the block range is before the start of the reservation
194 * 0 if the block range overlaps with the reservation
196 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
198 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
200 if (blk >= startblk + rs->rs_free)
202 if (blk + len - 1 < startblk)
208 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
209 * a block in a given allocation state.
210 * @buf: the buffer that holds the bitmaps
211 * @len: the length (in bytes) of the buffer
212 * @goal: start search at this block's bit-pair (within @buffer)
213 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
215 * Scope of @goal and returned block number is only within this bitmap buffer,
216 * not entire rgrp or filesystem. @buffer will be offset from the actual
217 * beginning of a bitmap block buffer, skipping any header structures, but
218 * headers are always a multiple of 64 bits long so that the buffer is
219 * always aligned to a 64 bit boundary.
221 * The size of the buffer is in bytes, but is it assumed that it is
222 * always ok to read a complete multiple of 64 bits at the end
223 * of the block in case the end is no aligned to a natural boundary.
225 * Return: the block number (bitmap buffer scope) that was found
228 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
231 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
232 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
233 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
235 u64 mask = 0x5555555555555555ULL;
238 /* Mask off bits we don't care about at the start of the search */
240 tmp = gfs2_bit_search(ptr, mask, state);
242 while(tmp == 0 && ptr < end) {
243 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
246 /* Mask off any bits which are more than len bytes from the start */
247 if (ptr == end && (len & (sizeof(u64) - 1)))
248 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
249 /* Didn't find anything, so return */
254 bit /= 2; /* two bits per entry in the bitmap */
255 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
259 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
260 * @rbm: The rbm with rgd already set correctly
261 * @block: The block number (filesystem relative)
263 * This sets the bi and offset members of an rbm based on a
264 * resource group and a filesystem relative block number. The
265 * resource group must be set in the rbm on entry, the bi and
266 * offset members will be set by this function.
268 * Returns: 0 on success, or an error code
271 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
273 if (!rgrp_contains_block(rbm->rgd, block))
276 rbm->offset = block - rbm->rgd->rd_data0;
277 /* Check if the block is within the first block */
278 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
281 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
282 rbm->offset += (sizeof(struct gfs2_rgrp) -
283 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
284 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
285 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
290 * gfs2_rbm_incr - increment an rbm structure
291 * @rbm: The rbm with rgd already set correctly
293 * This function takes an existing rbm structure and increments it to the next
294 * viable block offset.
296 * Returns: If incrementing the offset would cause the rbm to go past the
297 * end of the rgrp, true is returned, otherwise false.
301 static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
303 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
307 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
316 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
317 * @rbm: Position to search (value/result)
318 * @n_unaligned: Number of unaligned blocks to check
319 * @len: Decremented for each block found (terminate on zero)
321 * Returns: true if a non-free block is encountered
324 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
329 for (n = 0; n < n_unaligned; n++) {
330 res = gfs2_testbit(rbm, true);
331 if (res != GFS2_BLKST_FREE)
336 if (gfs2_rbm_incr(rbm))
344 * gfs2_free_extlen - Return extent length of free blocks
345 * @rrbm: Starting position
346 * @len: Max length to check
348 * Starting at the block specified by the rbm, see how many free blocks
349 * there are, not reading more than len blocks ahead. This can be done
350 * using memchr_inv when the blocks are byte aligned, but has to be done
351 * on a block by block basis in case of unaligned blocks. Also this
352 * function can cope with bitmap boundaries (although it must stop on
353 * a resource group boundary)
355 * Returns: Number of free blocks in the extent
358 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
360 struct gfs2_rbm rbm = *rrbm;
361 u32 n_unaligned = rbm.offset & 3;
365 u8 *ptr, *start, *end;
367 struct gfs2_bitmap *bi;
370 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
373 n_unaligned = len & 3;
374 /* Start is now byte aligned */
377 start = bi->bi_bh->b_data;
379 start = bi->bi_clone;
380 start += bi->bi_offset;
381 end = start + bi->bi_bytes;
382 BUG_ON(rbm.offset & 3);
383 start += (rbm.offset / GFS2_NBBY);
384 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
385 ptr = memchr_inv(start, 0, bytes);
386 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
387 chunk_size *= GFS2_NBBY;
388 BUG_ON(len < chunk_size);
390 block = gfs2_rbm_to_block(&rbm);
391 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
399 n_unaligned = len & 3;
402 /* Deal with any bits left over at the end */
404 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
410 * gfs2_bitcount - count the number of bits in a certain state
411 * @rgd: the resource group descriptor
412 * @buffer: the buffer that holds the bitmaps
413 * @buflen: the length (in bytes) of the buffer
414 * @state: the state of the block we're looking for
416 * Returns: The number of bits
419 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
420 unsigned int buflen, u8 state)
422 const u8 *byte = buffer;
423 const u8 *end = buffer + buflen;
424 const u8 state1 = state << 2;
425 const u8 state2 = state << 4;
426 const u8 state3 = state << 6;
429 for (; byte < end; byte++) {
430 if (((*byte) & 0x03) == state)
432 if (((*byte) & 0x0C) == state1)
434 if (((*byte) & 0x30) == state2)
436 if (((*byte) & 0xC0) == state3)
444 * gfs2_rgrp_verify - Verify that a resource group is consistent
449 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
451 struct gfs2_sbd *sdp = rgd->rd_sbd;
452 struct gfs2_bitmap *bi = NULL;
453 u32 length = rgd->rd_length;
457 memset(count, 0, 4 * sizeof(u32));
459 /* Count # blocks in each of 4 possible allocation states */
460 for (buf = 0; buf < length; buf++) {
461 bi = rgd->rd_bits + buf;
462 for (x = 0; x < 4; x++)
463 count[x] += gfs2_bitcount(rgd,
469 if (count[0] != rgd->rd_free) {
470 if (gfs2_consist_rgrpd(rgd))
471 fs_err(sdp, "free data mismatch: %u != %u\n",
472 count[0], rgd->rd_free);
476 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
477 if (count[1] != tmp) {
478 if (gfs2_consist_rgrpd(rgd))
479 fs_err(sdp, "used data mismatch: %u != %u\n",
484 if (count[2] + count[3] != rgd->rd_dinodes) {
485 if (gfs2_consist_rgrpd(rgd))
486 fs_err(sdp, "used metadata mismatch: %u != %u\n",
487 count[2] + count[3], rgd->rd_dinodes);
493 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
494 * @sdp: The GFS2 superblock
495 * @blk: The data block number
496 * @exact: True if this needs to be an exact match
498 * The @exact argument should be set to true by most callers. The exception
499 * is when we need to match blocks which are not represented by the rgrp
500 * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
501 * there for alignment purposes. Another way of looking at it is that @exact
502 * matches only valid data/metadata blocks, but with @exact false, it will
503 * match any block within the extent of the rgrp.
505 * Returns: The resource group, or NULL if not found
508 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
510 struct rb_node *n, *next;
511 struct gfs2_rgrpd *cur;
513 spin_lock(&sdp->sd_rindex_spin);
514 n = sdp->sd_rindex_tree.rb_node;
516 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
518 if (blk < cur->rd_addr)
520 else if (blk >= cur->rd_data0 + cur->rd_data)
523 spin_unlock(&sdp->sd_rindex_spin);
525 if (blk < cur->rd_addr)
527 if (blk >= cur->rd_data0 + cur->rd_data)
534 spin_unlock(&sdp->sd_rindex_spin);
540 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
541 * @sdp: The GFS2 superblock
543 * Returns: The first rgrp in the filesystem
546 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
548 const struct rb_node *n;
549 struct gfs2_rgrpd *rgd;
551 spin_lock(&sdp->sd_rindex_spin);
552 n = rb_first(&sdp->sd_rindex_tree);
553 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
554 spin_unlock(&sdp->sd_rindex_spin);
560 * gfs2_rgrpd_get_next - get the next RG
561 * @rgd: the resource group descriptor
563 * Returns: The next rgrp
566 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
568 struct gfs2_sbd *sdp = rgd->rd_sbd;
569 const struct rb_node *n;
571 spin_lock(&sdp->sd_rindex_spin);
572 n = rb_next(&rgd->rd_node);
574 n = rb_first(&sdp->sd_rindex_tree);
576 if (unlikely(&rgd->rd_node == n)) {
577 spin_unlock(&sdp->sd_rindex_spin);
580 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
581 spin_unlock(&sdp->sd_rindex_spin);
585 void check_and_update_goal(struct gfs2_inode *ip)
587 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
588 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
589 ip->i_goal = ip->i_no_addr;
592 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
596 for (x = 0; x < rgd->rd_length; x++) {
597 struct gfs2_bitmap *bi = rgd->rd_bits + x;
604 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
605 * plus a quota allocations data structure, if necessary
606 * @ip: the inode for this reservation
608 int gfs2_rsqa_alloc(struct gfs2_inode *ip)
610 return gfs2_qa_alloc(ip);
613 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs,
614 const char *fs_id_buf)
616 struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
618 gfs2_print_dbg(seq, "%s B: n:%llu s:%llu b:%u f:%u\n", fs_id_buf,
619 (unsigned long long)ip->i_no_addr,
620 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
621 rs->rs_rbm.offset, rs->rs_free);
625 * __rs_deltree - remove a multi-block reservation from the rgd tree
626 * @rs: The reservation to remove
629 static void __rs_deltree(struct gfs2_blkreserv *rs)
631 struct gfs2_rgrpd *rgd;
633 if (!gfs2_rs_active(rs))
636 rgd = rs->rs_rbm.rgd;
637 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
638 rb_erase(&rs->rs_node, &rgd->rd_rstree);
639 RB_CLEAR_NODE(&rs->rs_node);
642 u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
644 struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
645 struct gfs2_bitmap *start, *last;
647 /* return reserved blocks to the rgrp */
648 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
649 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
650 /* The rgrp extent failure point is likely not to increase;
651 it will only do so if the freed blocks are somehow
652 contiguous with a span of free blocks that follows. Still,
653 it will force the number to be recalculated later. */
654 rgd->rd_extfail_pt += rs->rs_free;
656 if (gfs2_rbm_from_block(&last_rbm, last_block))
658 start = rbm_bi(&rs->rs_rbm);
659 last = rbm_bi(&last_rbm);
661 clear_bit(GBF_FULL, &start->bi_flags);
662 while (start++ != last);
667 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
668 * @rs: The reservation to remove
671 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
673 struct gfs2_rgrpd *rgd;
675 rgd = rs->rs_rbm.rgd;
677 spin_lock(&rgd->rd_rsspin);
680 spin_unlock(&rgd->rd_rsspin);
685 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
686 * @ip: The inode for this reservation
687 * @wcount: The inode's write count, or NULL
690 void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
692 down_write(&ip->i_rw_mutex);
693 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
694 gfs2_rs_deltree(&ip->i_res);
695 up_write(&ip->i_rw_mutex);
696 gfs2_qa_delete(ip, wcount);
700 * return_all_reservations - return all reserved blocks back to the rgrp.
701 * @rgd: the rgrp that needs its space back
703 * We previously reserved a bunch of blocks for allocation. Now we need to
704 * give them back. This leave the reservation structures in tact, but removes
705 * all of their corresponding "no-fly zones".
707 static void return_all_reservations(struct gfs2_rgrpd *rgd)
710 struct gfs2_blkreserv *rs;
712 spin_lock(&rgd->rd_rsspin);
713 while ((n = rb_first(&rgd->rd_rstree))) {
714 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
717 spin_unlock(&rgd->rd_rsspin);
720 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
723 struct gfs2_rgrpd *rgd;
724 struct gfs2_glock *gl;
726 while ((n = rb_first(&sdp->sd_rindex_tree))) {
727 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
730 rb_erase(n, &sdp->sd_rindex_tree);
733 glock_clear_object(gl, rgd);
734 gfs2_rgrp_brelse(rgd);
738 gfs2_free_clones(rgd);
739 return_all_reservations(rgd);
742 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
746 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
748 struct gfs2_sbd *sdp = rgd->rd_sbd;
750 fs_info(sdp, "ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
751 fs_info(sdp, "ri_length = %u\n", rgd->rd_length);
752 fs_info(sdp, "ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
753 fs_info(sdp, "ri_data = %u\n", rgd->rd_data);
754 fs_info(sdp, "ri_bitbytes = %u\n", rgd->rd_bitbytes);
758 * gfs2_compute_bitstructs - Compute the bitmap sizes
759 * @rgd: The resource group descriptor
761 * Calculates bitmap descriptors, one for each block that contains bitmap data
766 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
768 struct gfs2_sbd *sdp = rgd->rd_sbd;
769 struct gfs2_bitmap *bi;
770 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
771 u32 bytes_left, bytes;
777 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
781 bytes_left = rgd->rd_bitbytes;
783 for (x = 0; x < length; x++) {
784 bi = rgd->rd_bits + x;
787 /* small rgrp; bitmap stored completely in header block */
790 bi->bi_offset = sizeof(struct gfs2_rgrp);
792 bi->bi_bytes = bytes;
793 bi->bi_blocks = bytes * GFS2_NBBY;
796 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
797 bi->bi_offset = sizeof(struct gfs2_rgrp);
799 bi->bi_bytes = bytes;
800 bi->bi_blocks = bytes * GFS2_NBBY;
802 } else if (x + 1 == length) {
804 bi->bi_offset = sizeof(struct gfs2_meta_header);
805 bi->bi_start = rgd->rd_bitbytes - bytes_left;
806 bi->bi_bytes = bytes;
807 bi->bi_blocks = bytes * GFS2_NBBY;
810 bytes = sdp->sd_sb.sb_bsize -
811 sizeof(struct gfs2_meta_header);
812 bi->bi_offset = sizeof(struct gfs2_meta_header);
813 bi->bi_start = rgd->rd_bitbytes - bytes_left;
814 bi->bi_bytes = bytes;
815 bi->bi_blocks = bytes * GFS2_NBBY;
822 gfs2_consist_rgrpd(rgd);
825 bi = rgd->rd_bits + (length - 1);
826 if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
827 if (gfs2_consist_rgrpd(rgd)) {
828 gfs2_rindex_print(rgd);
829 fs_err(sdp, "start=%u len=%u offset=%u\n",
830 bi->bi_start, bi->bi_bytes, bi->bi_offset);
839 * gfs2_ri_total - Total up the file system space, according to the rindex.
840 * @sdp: the filesystem
843 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
846 struct inode *inode = sdp->sd_rindex;
847 struct gfs2_inode *ip = GFS2_I(inode);
848 char buf[sizeof(struct gfs2_rindex)];
851 for (rgrps = 0;; rgrps++) {
852 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
854 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
856 error = gfs2_internal_read(ip, buf, &pos,
857 sizeof(struct gfs2_rindex));
858 if (error != sizeof(struct gfs2_rindex))
860 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
865 static int rgd_insert(struct gfs2_rgrpd *rgd)
867 struct gfs2_sbd *sdp = rgd->rd_sbd;
868 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
870 /* Figure out where to put new node */
872 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
876 if (rgd->rd_addr < cur->rd_addr)
877 newn = &((*newn)->rb_left);
878 else if (rgd->rd_addr > cur->rd_addr)
879 newn = &((*newn)->rb_right);
884 rb_link_node(&rgd->rd_node, parent, newn);
885 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
891 * read_rindex_entry - Pull in a new resource index entry from the disk
892 * @ip: Pointer to the rindex inode
894 * Returns: 0 on success, > 0 on EOF, error code otherwise
897 static int read_rindex_entry(struct gfs2_inode *ip)
899 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
900 const unsigned bsize = sdp->sd_sb.sb_bsize;
901 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
902 struct gfs2_rindex buf;
904 struct gfs2_rgrpd *rgd;
906 if (pos >= i_size_read(&ip->i_inode))
909 error = gfs2_internal_read(ip, (char *)&buf, &pos,
910 sizeof(struct gfs2_rindex));
912 if (error != sizeof(struct gfs2_rindex))
913 return (error == 0) ? 1 : error;
915 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
921 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
922 rgd->rd_length = be32_to_cpu(buf.ri_length);
923 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
924 rgd->rd_data = be32_to_cpu(buf.ri_data);
925 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
926 spin_lock_init(&rgd->rd_rsspin);
928 error = gfs2_glock_get(sdp, rgd->rd_addr,
929 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
933 error = compute_bitstructs(rgd);
937 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
938 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
939 if (rgd->rd_data > sdp->sd_max_rg_data)
940 sdp->sd_max_rg_data = rgd->rd_data;
941 spin_lock(&sdp->sd_rindex_spin);
942 error = rgd_insert(rgd);
943 spin_unlock(&sdp->sd_rindex_spin);
945 glock_set_object(rgd->rd_gl, rgd);
946 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
947 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
948 rgd->rd_length) * bsize) - 1;
952 error = 0; /* someone else read in the rgrp; free it and ignore it */
954 gfs2_glock_put(rgd->rd_gl);
959 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
964 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
965 * @sdp: the GFS2 superblock
967 * The purpose of this function is to select a subset of the resource groups
968 * and mark them as PREFERRED. We do it in such a way that each node prefers
969 * to use a unique set of rgrps to minimize glock contention.
971 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
973 struct gfs2_rgrpd *rgd, *first;
976 /* Skip an initial number of rgrps, based on this node's journal ID.
977 That should start each node out on its own set. */
978 rgd = gfs2_rgrpd_get_first(sdp);
979 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
980 rgd = gfs2_rgrpd_get_next(rgd);
984 rgd->rd_flags |= GFS2_RDF_PREFERRED;
985 for (i = 0; i < sdp->sd_journals; i++) {
986 rgd = gfs2_rgrpd_get_next(rgd);
987 if (!rgd || rgd == first)
990 } while (rgd && rgd != first);
994 * gfs2_ri_update - Pull in a new resource index from the disk
995 * @ip: pointer to the rindex inode
997 * Returns: 0 on successful update, error code otherwise
1000 static int gfs2_ri_update(struct gfs2_inode *ip)
1002 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1006 error = read_rindex_entry(ip);
1007 } while (error == 0);
1012 if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
1013 fs_err(sdp, "no resource groups found in the file system.\n");
1016 set_rgrp_preferences(sdp);
1018 sdp->sd_rindex_uptodate = 1;
1023 * gfs2_rindex_update - Update the rindex if required
1024 * @sdp: The GFS2 superblock
1026 * We grab a lock on the rindex inode to make sure that it doesn't
1027 * change whilst we are performing an operation. We keep this lock
1028 * for quite long periods of time compared to other locks. This
1029 * doesn't matter, since it is shared and it is very, very rarely
1030 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1032 * This makes sure that we're using the latest copy of the resource index
1033 * special file, which might have been updated if someone expanded the
1034 * filesystem (via gfs2_grow utility), which adds new resource groups.
1036 * Returns: 0 on succeess, error code otherwise
1039 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1041 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1042 struct gfs2_glock *gl = ip->i_gl;
1043 struct gfs2_holder ri_gh;
1045 int unlock_required = 0;
1047 /* Read new copy from disk if we don't have the latest */
1048 if (!sdp->sd_rindex_uptodate) {
1049 if (!gfs2_glock_is_locked_by_me(gl)) {
1050 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1053 unlock_required = 1;
1055 if (!sdp->sd_rindex_uptodate)
1056 error = gfs2_ri_update(ip);
1057 if (unlock_required)
1058 gfs2_glock_dq_uninit(&ri_gh);
1064 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1066 const struct gfs2_rgrp *str = buf;
1069 rg_flags = be32_to_cpu(str->rg_flags);
1070 rg_flags &= ~GFS2_RDF_MASK;
1071 rgd->rd_flags &= GFS2_RDF_MASK;
1072 rgd->rd_flags |= rg_flags;
1073 rgd->rd_free = be32_to_cpu(str->rg_free);
1074 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1075 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1076 /* rd_data0, rd_data and rd_bitbytes already set from rindex */
1079 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1081 const struct gfs2_rgrp *str = buf;
1083 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1084 rgl->rl_flags = str->rg_flags;
1085 rgl->rl_free = str->rg_free;
1086 rgl->rl_dinodes = str->rg_dinodes;
1087 rgl->rl_igeneration = str->rg_igeneration;
1091 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1093 struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
1094 struct gfs2_rgrp *str = buf;
1097 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1098 str->rg_free = cpu_to_be32(rgd->rd_free);
1099 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1102 else if (next->rd_addr > rgd->rd_addr)
1103 str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
1104 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1105 str->rg_data0 = cpu_to_be64(rgd->rd_data0);
1106 str->rg_data = cpu_to_be32(rgd->rd_data);
1107 str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
1109 crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp));
1110 str->rg_crc = cpu_to_be32(crc);
1112 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1113 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
1116 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1118 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1119 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1120 struct gfs2_sbd *sdp = rgd->rd_sbd;
1123 if (rgl->rl_flags != str->rg_flags) {
1124 fs_warn(sdp, "GFS2: rgd: %llu lvb flag mismatch %u/%u",
1125 (unsigned long long)rgd->rd_addr,
1126 be32_to_cpu(rgl->rl_flags), be32_to_cpu(str->rg_flags));
1129 if (rgl->rl_free != str->rg_free) {
1130 fs_warn(sdp, "GFS2: rgd: %llu lvb free mismatch %u/%u",
1131 (unsigned long long)rgd->rd_addr,
1132 be32_to_cpu(rgl->rl_free), be32_to_cpu(str->rg_free));
1135 if (rgl->rl_dinodes != str->rg_dinodes) {
1136 fs_warn(sdp, "GFS2: rgd: %llu lvb dinode mismatch %u/%u",
1137 (unsigned long long)rgd->rd_addr,
1138 be32_to_cpu(rgl->rl_dinodes),
1139 be32_to_cpu(str->rg_dinodes));
1142 if (rgl->rl_igeneration != str->rg_igeneration) {
1143 fs_warn(sdp, "GFS2: rgd: %llu lvb igen mismatch %llu/%llu",
1144 (unsigned long long)rgd->rd_addr,
1145 (unsigned long long)be64_to_cpu(rgl->rl_igeneration),
1146 (unsigned long long)be64_to_cpu(str->rg_igeneration));
1152 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1154 struct gfs2_bitmap *bi;
1155 const u32 length = rgd->rd_length;
1156 const u8 *buffer = NULL;
1157 u32 i, goal, count = 0;
1159 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1161 buffer = bi->bi_bh->b_data + bi->bi_offset;
1162 WARN_ON(!buffer_uptodate(bi->bi_bh));
1163 while (goal < bi->bi_blocks) {
1164 goal = gfs2_bitfit(buffer, bi->bi_bytes, goal,
1165 GFS2_BLKST_UNLINKED);
1166 if (goal == BFITNOENT)
1178 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1179 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1181 * Read in all of a Resource Group's header and bitmap blocks.
1182 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1187 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1189 struct gfs2_sbd *sdp = rgd->rd_sbd;
1190 struct gfs2_glock *gl = rgd->rd_gl;
1191 unsigned int length = rgd->rd_length;
1192 struct gfs2_bitmap *bi;
1196 if (rgd->rd_bits[0].bi_bh != NULL)
1199 for (x = 0; x < length; x++) {
1200 bi = rgd->rd_bits + x;
1201 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1206 for (y = length; y--;) {
1207 bi = rgd->rd_bits + y;
1208 error = gfs2_meta_wait(sdp, bi->bi_bh);
1211 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1212 GFS2_METATYPE_RG)) {
1218 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1219 for (x = 0; x < length; x++)
1220 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1221 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1222 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1223 rgd->rd_free_clone = rgd->rd_free;
1224 /* max out the rgrp allocation failure point */
1225 rgd->rd_extfail_pt = rgd->rd_free;
1227 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1228 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1229 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1230 rgd->rd_bits[0].bi_bh->b_data);
1232 else if (sdp->sd_args.ar_rgrplvb) {
1233 if (!gfs2_rgrp_lvb_valid(rgd)){
1234 gfs2_consist_rgrpd(rgd);
1238 if (rgd->rd_rgl->rl_unlinked == 0)
1239 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1245 bi = rgd->rd_bits + x;
1248 gfs2_assert_warn(sdp, !bi->bi_clone);
1254 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1258 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1261 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1262 return gfs2_rgrp_bh_get(rgd);
1264 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1265 rl_flags &= ~GFS2_RDF_MASK;
1266 rgd->rd_flags &= GFS2_RDF_MASK;
1267 rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
1268 if (rgd->rd_rgl->rl_unlinked == 0)
1269 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1270 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1271 rgd->rd_free_clone = rgd->rd_free;
1272 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1273 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1277 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1279 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1280 struct gfs2_sbd *sdp = rgd->rd_sbd;
1282 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1284 return gfs2_rgrp_bh_get(rgd);
1288 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1289 * @rgd: The resource group
1293 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1295 int x, length = rgd->rd_length;
1297 for (x = 0; x < length; x++) {
1298 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1308 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1309 * @gh: The glock holder for the resource group
1313 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1315 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1316 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1317 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1319 if (rgd && demote_requested)
1320 gfs2_rgrp_brelse(rgd);
1323 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1324 struct buffer_head *bh,
1325 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1327 struct super_block *sb = sdp->sd_vfs;
1330 sector_t nr_blks = 0;
1336 for (x = 0; x < bi->bi_bytes; x++) {
1337 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1338 clone += bi->bi_offset;
1341 const u8 *orig = bh->b_data + bi->bi_offset + x;
1342 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1344 diff = ~(*clone | (*clone >> 1));
1349 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1353 goto start_new_extent;
1354 if ((start + nr_blks) != blk) {
1355 if (nr_blks >= minlen) {
1356 rv = sb_issue_discard(sb,
1373 if (nr_blks >= minlen) {
1374 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1380 *ptrimmed = trimmed;
1384 if (sdp->sd_args.ar_discard)
1385 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
1386 sdp->sd_args.ar_discard = 0;
1391 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1392 * @filp: Any file on the filesystem
1393 * @argp: Pointer to the arguments (also used to pass result)
1395 * Returns: 0 on success, otherwise error code
1398 int gfs2_fitrim(struct file *filp, void __user *argp)
1400 struct inode *inode = file_inode(filp);
1401 struct gfs2_sbd *sdp = GFS2_SB(inode);
1402 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1403 struct buffer_head *bh;
1404 struct gfs2_rgrpd *rgd;
1405 struct gfs2_rgrpd *rgd_end;
1406 struct gfs2_holder gh;
1407 struct fstrim_range r;
1411 u64 start, end, minlen;
1413 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1415 if (!capable(CAP_SYS_ADMIN))
1418 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1421 if (!blk_queue_discard(q))
1424 if (copy_from_user(&r, argp, sizeof(r)))
1427 ret = gfs2_rindex_update(sdp);
1431 start = r.start >> bs_shift;
1432 end = start + (r.len >> bs_shift);
1433 minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
1434 minlen = max_t(u64, minlen,
1435 q->limits.discard_granularity) >> bs_shift;
1437 if (end <= start || minlen > sdp->sd_max_rg_data)
1440 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1441 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1443 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1444 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1445 return -EINVAL; /* start is beyond the end of the fs */
1449 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1453 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1454 /* Trim each bitmap in the rgrp */
1455 for (x = 0; x < rgd->rd_length; x++) {
1456 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1457 ret = gfs2_rgrp_send_discards(sdp,
1458 rgd->rd_data0, NULL, bi, minlen,
1461 gfs2_glock_dq_uninit(&gh);
1467 /* Mark rgrp as having been trimmed */
1468 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1470 bh = rgd->rd_bits[0].bi_bh;
1471 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1472 gfs2_trans_add_meta(rgd->rd_gl, bh);
1473 gfs2_rgrp_out(rgd, bh->b_data);
1474 gfs2_trans_end(sdp);
1477 gfs2_glock_dq_uninit(&gh);
1482 rgd = gfs2_rgrpd_get_next(rgd);
1486 r.len = trimmed << bs_shift;
1487 if (copy_to_user(argp, &r, sizeof(r)))
1494 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1495 * @ip: the inode structure
1498 static void rs_insert(struct gfs2_inode *ip)
1500 struct rb_node **newn, *parent = NULL;
1502 struct gfs2_blkreserv *rs = &ip->i_res;
1503 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1504 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1506 BUG_ON(gfs2_rs_active(rs));
1508 spin_lock(&rgd->rd_rsspin);
1509 newn = &rgd->rd_rstree.rb_node;
1511 struct gfs2_blkreserv *cur =
1512 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1515 rc = rs_cmp(fsblock, rs->rs_free, cur);
1517 newn = &((*newn)->rb_right);
1519 newn = &((*newn)->rb_left);
1521 spin_unlock(&rgd->rd_rsspin);
1527 rb_link_node(&rs->rs_node, parent, newn);
1528 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1530 /* Do our rgrp accounting for the reservation */
1531 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1532 spin_unlock(&rgd->rd_rsspin);
1533 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1537 * rgd_free - return the number of free blocks we can allocate.
1538 * @rgd: the resource group
1540 * This function returns the number of free blocks for an rgrp.
1541 * That's the clone-free blocks (blocks that are free, not including those
1542 * still being used for unlinked files that haven't been deleted.)
1544 * It also subtracts any blocks reserved by someone else, but does not
1545 * include free blocks that are still part of our current reservation,
1546 * because obviously we can (and will) allocate them.
1548 static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
1550 u32 tot_reserved, tot_free;
1552 if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
1554 tot_reserved = rgd->rd_reserved - rs->rs_free;
1556 if (rgd->rd_free_clone < tot_reserved)
1559 tot_free = rgd->rd_free_clone - tot_reserved;
1565 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1566 * @rgd: the resource group descriptor
1567 * @ip: pointer to the inode for which we're reserving blocks
1568 * @ap: the allocation parameters
1572 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1573 const struct gfs2_alloc_parms *ap)
1575 struct gfs2_rbm rbm = { .rgd = rgd, };
1577 struct gfs2_blkreserv *rs = &ip->i_res;
1579 u32 free_blocks = rgd_free(rgd, rs);
1581 struct inode *inode = &ip->i_inode;
1583 if (S_ISDIR(inode->i_mode))
1586 extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
1587 extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
1589 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1592 /* Find bitmap block that contains bits for goal block */
1593 if (rgrp_contains_block(rgd, ip->i_goal))
1596 goal = rgd->rd_last_alloc + rgd->rd_data0;
1598 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1601 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
1604 rs->rs_free = extlen;
1607 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1608 rgd->rd_last_alloc = 0;
1613 * gfs2_next_unreserved_block - Return next block that is not reserved
1614 * @rgd: The resource group
1615 * @block: The starting block
1616 * @length: The required length
1617 * @ip: Ignore any reservations for this inode
1619 * If the block does not appear in any reservation, then return the
1620 * block number unchanged. If it does appear in the reservation, then
1621 * keep looking through the tree of reservations in order to find the
1622 * first block number which is not reserved.
1625 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1627 const struct gfs2_inode *ip)
1629 struct gfs2_blkreserv *rs;
1633 spin_lock(&rgd->rd_rsspin);
1634 n = rgd->rd_rstree.rb_node;
1636 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1637 rc = rs_cmp(block, length, rs);
1647 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
1648 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1652 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1656 spin_unlock(&rgd->rd_rsspin);
1661 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1662 * @rbm: The current position in the resource group
1663 * @ip: The inode for which we are searching for blocks
1664 * @minext: The minimum extent length
1665 * @maxext: A pointer to the maximum extent structure
1667 * This checks the current position in the rgrp to see whether there is
1668 * a reservation covering this block. If not then this function is a
1669 * no-op. If there is, then the position is moved to the end of the
1670 * contiguous reservation(s) so that we are pointing at the first
1671 * non-reserved block.
1673 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1676 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1677 const struct gfs2_inode *ip,
1679 struct gfs2_extent *maxext)
1681 u64 block = gfs2_rbm_to_block(rbm);
1687 * If we have a minimum extent length, then skip over any extent
1688 * which is less than the min extent length in size.
1691 extlen = gfs2_free_extlen(rbm, minext);
1692 if (extlen <= maxext->len)
1697 * Check the extent which has been found against the reservations
1698 * and skip if parts of it are already reserved
1700 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1701 if (nblock == block) {
1702 if (!minext || extlen >= minext)
1705 if (extlen > maxext->len) {
1706 maxext->len = extlen;
1710 nblock = block + extlen;
1712 ret = gfs2_rbm_from_block(rbm, nblock);
1719 * gfs2_rbm_find - Look for blocks of a particular state
1720 * @rbm: Value/result starting position and final position
1721 * @state: The state which we want to find
1722 * @minext: Pointer to the requested extent length (NULL for a single block)
1723 * This is updated to be the actual reservation size.
1724 * @ip: If set, check for reservations
1725 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1726 * around until we've reached the starting point.
1729 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1730 * has no free blocks in it.
1731 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1732 * has come up short on a free block search.
1734 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1737 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1738 const struct gfs2_inode *ip, bool nowrap)
1740 bool scan_from_start = rbm->bii == 0 && rbm->offset == 0;
1741 struct buffer_head *bh;
1745 bool wrapped = false;
1747 struct gfs2_bitmap *bi;
1748 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1751 * Determine the last bitmap to search. If we're not starting at the
1752 * beginning of a bitmap, we need to search that bitmap twice to scan
1753 * the entire resource group.
1755 last_bii = rbm->bii - (rbm->offset == 0);
1759 if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
1760 test_bit(GBF_FULL, &bi->bi_flags) &&
1761 (state == GFS2_BLKST_FREE))
1765 buffer = bh->b_data + bi->bi_offset;
1766 WARN_ON(!buffer_uptodate(bh));
1767 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1768 buffer = bi->bi_clone + bi->bi_offset;
1769 offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state);
1770 if (offset == BFITNOENT) {
1771 if (state == GFS2_BLKST_FREE && rbm->offset == 0)
1772 set_bit(GBF_FULL, &bi->bi_flags);
1775 rbm->offset = offset;
1779 ret = gfs2_reservation_check_and_update(rbm, ip,
1780 minext ? *minext : 0,
1786 if (ret == -E2BIG) {
1789 goto res_covered_end_of_rgrp;
1793 next_bitmap: /* Find next bitmap in the rgrp */
1796 if (rbm->bii == rbm->rgd->rd_length)
1798 res_covered_end_of_rgrp:
1799 if (rbm->bii == 0) {
1807 /* Have we scanned the entire resource group? */
1808 if (wrapped && rbm->bii > last_bii)
1812 if (minext == NULL || state != GFS2_BLKST_FREE)
1815 /* If the extent was too small, and it's smaller than the smallest
1816 to have failed before, remember for future reference that it's
1817 useless to search this rgrp again for this amount or more. */
1818 if (wrapped && (scan_from_start || rbm->bii > last_bii) &&
1819 *minext < rbm->rgd->rd_extfail_pt)
1820 rbm->rgd->rd_extfail_pt = *minext;
1822 /* If the maximum extent we found is big enough to fulfill the
1823 minimum requirements, use it anyway. */
1826 *minext = maxext.len;
1834 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1836 * @last_unlinked: block address of the last dinode we unlinked
1837 * @skip: block address we should explicitly not unlink
1839 * Returns: 0 if no error
1840 * The inode, if one has been found, in inode.
1843 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1846 struct gfs2_sbd *sdp = rgd->rd_sbd;
1847 struct gfs2_glock *gl;
1848 struct gfs2_inode *ip;
1851 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1854 down_write(&sdp->sd_log_flush_lock);
1855 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1857 up_write(&sdp->sd_log_flush_lock);
1858 if (error == -ENOSPC)
1860 if (WARN_ON_ONCE(error))
1863 block = gfs2_rbm_to_block(&rbm);
1864 if (gfs2_rbm_from_block(&rbm, block + 1))
1866 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1870 *last_unlinked = block;
1872 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1876 /* If the inode is already in cache, we can ignore it here
1877 * because the existing inode disposal code will deal with
1878 * it when all refs have gone away. Accessing gl_object like
1879 * this is not safe in general. Here it is ok because we do
1880 * not dereference the pointer, and we only need an approx
1881 * answer to whether it is NULL or not.
1885 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1890 /* Limit reclaim to sensible number of tasks */
1891 if (found > NR_CPUS)
1895 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1900 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1901 * @rgd: The rgrp in question
1902 * @loops: An indication of how picky we can be (0=very, 1=less so)
1904 * This function uses the recently added glock statistics in order to
1905 * figure out whether a parciular resource group is suffering from
1906 * contention from multiple nodes. This is done purely on the basis
1907 * of timings, since this is the only data we have to work with and
1908 * our aim here is to reject a resource group which is highly contended
1909 * but (very important) not to do this too often in order to ensure that
1910 * we do not land up introducing fragmentation by changing resource
1911 * groups when not actually required.
1913 * The calculation is fairly simple, we want to know whether the SRTTB
1914 * (i.e. smoothed round trip time for blocking operations) to acquire
1915 * the lock for this rgrp's glock is significantly greater than the
1916 * time taken for resource groups on average. We introduce a margin in
1917 * the form of the variable @var which is computed as the sum of the two
1918 * respective variences, and multiplied by a factor depending on @loops
1919 * and whether we have a lot of data to base the decision on. This is
1920 * then tested against the square difference of the means in order to
1921 * decide whether the result is statistically significant or not.
1923 * Returns: A boolean verdict on the congestion status
1926 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1928 const struct gfs2_glock *gl = rgd->rd_gl;
1929 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1930 struct gfs2_lkstats *st;
1931 u64 r_dcount, l_dcount;
1932 u64 l_srttb, a_srttb = 0;
1936 int cpu, nonzero = 0;
1939 for_each_present_cpu(cpu) {
1940 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1941 if (st->stats[GFS2_LKS_SRTTB]) {
1942 a_srttb += st->stats[GFS2_LKS_SRTTB];
1946 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1948 do_div(a_srttb, nonzero);
1949 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1950 var = st->stats[GFS2_LKS_SRTTVARB] +
1951 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1954 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1955 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1957 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1960 srttb_diff = a_srttb - l_srttb;
1961 sqr_diff = srttb_diff * srttb_diff;
1964 if (l_dcount < 8 || r_dcount < 8)
1969 return ((srttb_diff < 0) && (sqr_diff > var));
1973 * gfs2_rgrp_used_recently
1974 * @rs: The block reservation with the rgrp to test
1975 * @msecs: The time limit in milliseconds
1977 * Returns: True if the rgrp glock has been used within the time limit
1979 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1984 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1985 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1987 return tdiff > (msecs * 1000 * 1000);
1990 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1992 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1995 get_random_bytes(&skip, sizeof(skip));
1996 return skip % sdp->sd_rgrps;
1999 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
2001 struct gfs2_rgrpd *rgd = *pos;
2002 struct gfs2_sbd *sdp = rgd->rd_sbd;
2004 rgd = gfs2_rgrpd_get_next(rgd);
2006 rgd = gfs2_rgrpd_get_first(sdp);
2008 if (rgd != begin) /* If we didn't wrap */
2014 * fast_to_acquire - determine if a resource group will be fast to acquire
2016 * If this is one of our preferred rgrps, it should be quicker to acquire,
2017 * because we tried to set ourselves up as dlm lock master.
2019 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
2021 struct gfs2_glock *gl = rgd->rd_gl;
2023 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
2024 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
2025 !test_bit(GLF_DEMOTE, &gl->gl_flags))
2027 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
2033 * gfs2_inplace_reserve - Reserve space in the filesystem
2034 * @ip: the inode to reserve space for
2035 * @ap: the allocation parameters
2037 * We try our best to find an rgrp that has at least ap->target blocks
2038 * available. After a couple of passes (loops == 2), the prospects of finding
2039 * such an rgrp diminish. At this stage, we return the first rgrp that has
2040 * at least ap->min_target blocks available. Either way, we set ap->allowed to
2041 * the number of blocks available in the chosen rgrp.
2043 * Returns: 0 on success,
2044 * -ENOMEM if a suitable rgrp can't be found
2048 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
2050 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2051 struct gfs2_rgrpd *begin = NULL;
2052 struct gfs2_blkreserv *rs = &ip->i_res;
2053 int error = 0, rg_locked, flags = 0;
2054 u64 last_unlinked = NO_BLOCK;
2056 u32 free_blocks, skip = 0;
2058 if (sdp->sd_args.ar_rgrplvb)
2060 if (gfs2_assert_warn(sdp, ap->target))
2062 if (gfs2_rs_active(rs)) {
2063 begin = rs->rs_rbm.rgd;
2064 } else if (rs->rs_rbm.rgd &&
2065 rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) {
2066 begin = rs->rs_rbm.rgd;
2068 check_and_update_goal(ip);
2069 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
2071 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
2072 skip = gfs2_orlov_skip(ip);
2073 if (rs->rs_rbm.rgd == NULL)
2079 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
2083 if (!gfs2_rs_active(rs)) {
2085 !fast_to_acquire(rs->rs_rbm.rgd))
2088 gfs2_rgrp_used_recently(rs, 1000) &&
2089 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2092 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2093 LM_ST_EXCLUSIVE, flags,
2095 if (unlikely(error))
2097 if (!gfs2_rs_active(rs) && (loops < 2) &&
2098 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2100 if (sdp->sd_args.ar_rgrplvb) {
2101 error = update_rgrp_lvb(rs->rs_rbm.rgd);
2102 if (unlikely(error)) {
2103 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2109 /* Skip unusable resource groups */
2110 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2112 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2115 if (sdp->sd_args.ar_rgrplvb)
2116 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2118 /* Get a reservation if we don't already have one */
2119 if (!gfs2_rs_active(rs))
2120 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2122 /* Skip rgrps when we can't get a reservation on first pass */
2123 if (!gfs2_rs_active(rs) && (loops < 1))
2126 /* If rgrp has enough free space, use it */
2127 free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
2128 if (free_blocks >= ap->target ||
2129 (loops == 2 && ap->min_target &&
2130 free_blocks >= ap->min_target)) {
2131 ap->allowed = free_blocks;
2135 /* Check for unlinked inodes which can be reclaimed */
2136 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2137 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2140 /* Drop reservation, if we couldn't use reserved rgrp */
2141 if (gfs2_rs_active(rs))
2142 gfs2_rs_deltree(rs);
2144 /* Unlock rgrp if required */
2146 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2148 /* Find the next rgrp, and continue looking */
2149 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2154 /* If we've scanned all the rgrps, but found no free blocks
2155 * then this checks for some less likely conditions before
2159 /* Check that fs hasn't grown if writing to rindex */
2160 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2161 error = gfs2_ri_update(ip);
2165 /* Flushing the log may release space */
2167 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
2168 GFS2_LFC_INPLACE_RESERVE);
2175 * gfs2_inplace_release - release an inplace reservation
2176 * @ip: the inode the reservation was taken out on
2178 * Release a reservation made by gfs2_inplace_reserve().
2181 void gfs2_inplace_release(struct gfs2_inode *ip)
2183 if (gfs2_holder_initialized(&ip->i_rgd_gh))
2184 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2188 * gfs2_alloc_extent - allocate an extent from a given bitmap
2189 * @rbm: the resource group information
2190 * @dinode: TRUE if the first block we allocate is for a dinode
2191 * @n: The extent length (value/result)
2193 * Add the bitmap buffer to the transaction.
2194 * Set the found bits to @new_state to change block's allocation state.
2196 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2199 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2200 const unsigned int elen = *n;
2205 block = gfs2_rbm_to_block(rbm);
2206 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2207 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2210 ret = gfs2_rbm_from_block(&pos, block);
2211 if (ret || gfs2_testbit(&pos, true) != GFS2_BLKST_FREE)
2213 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2214 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2221 * rgblk_free - Change alloc state of given block(s)
2222 * @sdp: the filesystem
2223 * @rgd: the resource group the blocks are in
2224 * @bstart: the start of a run of blocks to free
2225 * @blen: the length of the block run (all must lie within ONE RG!)
2226 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2229 static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
2230 u64 bstart, u32 blen, unsigned char new_state)
2232 struct gfs2_rbm rbm;
2233 struct gfs2_bitmap *bi, *bi_prev = NULL;
2236 if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart)))
2240 if (bi != bi_prev) {
2241 if (!bi->bi_clone) {
2242 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2243 GFP_NOFS | __GFP_NOFAIL);
2244 memcpy(bi->bi_clone + bi->bi_offset,
2245 bi->bi_bh->b_data + bi->bi_offset,
2248 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2251 gfs2_setbit(&rbm, false, new_state);
2252 gfs2_rbm_incr(&rbm);
2257 * gfs2_rgrp_dump - print out an rgrp
2258 * @seq: The iterator
2259 * @gl: The glock in question
2260 * @fs_id_buf: pointer to file system id (if requested)
2264 void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
2265 const char *fs_id_buf)
2267 struct gfs2_rgrpd *rgd = gl->gl_object;
2268 struct gfs2_blkreserv *trs;
2269 const struct rb_node *n;
2273 gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2275 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2276 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2277 rgd->rd_reserved, rgd->rd_extfail_pt);
2278 if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
2279 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
2281 gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
2282 be32_to_cpu(rgl->rl_flags),
2283 be32_to_cpu(rgl->rl_free),
2284 be32_to_cpu(rgl->rl_dinodes));
2286 spin_lock(&rgd->rd_rsspin);
2287 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2288 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2289 dump_rs(seq, trs, fs_id_buf);
2291 spin_unlock(&rgd->rd_rsspin);
2294 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2296 struct gfs2_sbd *sdp = rgd->rd_sbd;
2297 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
2299 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2300 (unsigned long long)rgd->rd_addr);
2301 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2302 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
2303 gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
2304 rgd->rd_flags |= GFS2_RDF_ERROR;
2308 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2309 * @ip: The inode we have just allocated blocks for
2310 * @rbm: The start of the allocated blocks
2311 * @len: The extent length
2313 * Adjusts a reservation after an allocation has taken place. If the
2314 * reservation does not match the allocation, or if it is now empty
2315 * then it is removed.
2318 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2319 const struct gfs2_rbm *rbm, unsigned len)
2321 struct gfs2_blkreserv *rs = &ip->i_res;
2322 struct gfs2_rgrpd *rgd = rbm->rgd;
2327 spin_lock(&rgd->rd_rsspin);
2328 if (gfs2_rs_active(rs)) {
2329 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2330 block = gfs2_rbm_to_block(rbm);
2331 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2332 rlen = min(rs->rs_free, len);
2333 rs->rs_free -= rlen;
2334 rgd->rd_reserved -= rlen;
2335 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2336 if (rs->rs_free && !ret)
2338 /* We used up our block reservation, so we should
2339 reserve more blocks next time. */
2340 atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
2345 spin_unlock(&rgd->rd_rsspin);
2349 * gfs2_set_alloc_start - Set starting point for block allocation
2350 * @rbm: The rbm which will be set to the required location
2351 * @ip: The gfs2 inode
2352 * @dinode: Flag to say if allocation includes a new inode
2354 * This sets the starting point from the reservation if one is active
2355 * otherwise it falls back to guessing a start point based on the
2356 * inode's goal block or the last allocation point in the rgrp.
2359 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2360 const struct gfs2_inode *ip, bool dinode)
2364 if (gfs2_rs_active(&ip->i_res)) {
2365 *rbm = ip->i_res.rs_rbm;
2369 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2372 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2374 if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) {
2381 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2382 * @ip: the inode to allocate the block for
2383 * @bn: Used to return the starting block number
2384 * @nblocks: requested number of blocks/extent length (value/result)
2385 * @dinode: 1 if we're allocating a dinode block, else 0
2386 * @generation: the generation number of the inode
2388 * Returns: 0 or error
2391 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2392 bool dinode, u64 *generation)
2394 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2395 struct buffer_head *dibh;
2396 struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, };
2398 u64 block; /* block, within the file system scope */
2401 gfs2_set_alloc_start(&rbm, ip, dinode);
2402 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
2404 if (error == -ENOSPC) {
2405 gfs2_set_alloc_start(&rbm, ip, dinode);
2406 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
2409 /* Since all blocks are reserved in advance, this shouldn't happen */
2411 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2412 (unsigned long long)ip->i_no_addr, error, *nblocks,
2413 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2414 rbm.rgd->rd_extfail_pt);
2418 gfs2_alloc_extent(&rbm, dinode, nblocks);
2419 block = gfs2_rbm_to_block(&rbm);
2420 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2421 if (gfs2_rs_active(&ip->i_res))
2422 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2428 ip->i_goal = block + ndata - 1;
2429 error = gfs2_meta_inode_buffer(ip, &dibh);
2431 struct gfs2_dinode *di =
2432 (struct gfs2_dinode *)dibh->b_data;
2433 gfs2_trans_add_meta(ip->i_gl, dibh);
2434 di->di_goal_meta = di->di_goal_data =
2435 cpu_to_be64(ip->i_goal);
2439 if (rbm.rgd->rd_free < *nblocks) {
2440 fs_warn(sdp, "nblocks=%u\n", *nblocks);
2444 rbm.rgd->rd_free -= *nblocks;
2446 rbm.rgd->rd_dinodes++;
2447 *generation = rbm.rgd->rd_igeneration++;
2448 if (*generation == 0)
2449 *generation = rbm.rgd->rd_igeneration++;
2452 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2453 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2455 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2457 gfs2_trans_remove_revoke(sdp, block, *nblocks);
2459 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2461 rbm.rgd->rd_free_clone -= *nblocks;
2462 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2463 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2468 gfs2_rgrp_error(rbm.rgd);
2473 * __gfs2_free_blocks - free a contiguous run of block(s)
2474 * @ip: the inode these blocks are being freed from
2475 * @rgd: the resource group the blocks are in
2476 * @bstart: first block of a run of contiguous blocks
2477 * @blen: the length of the block run
2478 * @meta: 1 if the blocks represent metadata
2482 void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
2483 u64 bstart, u32 blen, int meta)
2485 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2487 rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
2488 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2489 rgd->rd_free += blen;
2490 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2491 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2492 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2494 /* Directories keep their data in the metadata address space */
2495 if (meta || ip->i_depth)
2496 gfs2_meta_wipe(ip, bstart, blen);
2500 * gfs2_free_meta - free a contiguous run of data block(s)
2501 * @ip: the inode these blocks are being freed from
2502 * @rgd: the resource group the blocks are in
2503 * @bstart: first block of a run of contiguous blocks
2504 * @blen: the length of the block run
2508 void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
2509 u64 bstart, u32 blen)
2511 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2513 __gfs2_free_blocks(ip, rgd, bstart, blen, 1);
2514 gfs2_statfs_change(sdp, 0, +blen, 0);
2515 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2518 void gfs2_unlink_di(struct inode *inode)
2520 struct gfs2_inode *ip = GFS2_I(inode);
2521 struct gfs2_sbd *sdp = GFS2_SB(inode);
2522 struct gfs2_rgrpd *rgd;
2523 u64 blkno = ip->i_no_addr;
2525 rgd = gfs2_blk2rgrpd(sdp, blkno, true);
2528 rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2529 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2530 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2531 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2532 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
2535 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2537 struct gfs2_sbd *sdp = rgd->rd_sbd;
2539 rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2540 if (!rgd->rd_dinodes)
2541 gfs2_consist_rgrpd(rgd);
2545 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2546 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2547 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
2549 gfs2_statfs_change(sdp, 0, +1, -1);
2550 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2551 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2552 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2556 * gfs2_check_blk_type - Check the type of a block
2557 * @sdp: The superblock
2558 * @no_addr: The block number to check
2559 * @type: The block type we are looking for
2561 * Returns: 0 if the block type matches the expected type
2562 * -ESTALE if it doesn't match
2563 * or -ve errno if something went wrong while checking
2566 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2568 struct gfs2_rgrpd *rgd;
2569 struct gfs2_holder rgd_gh;
2570 struct gfs2_rbm rbm;
2571 int error = -EINVAL;
2573 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2577 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2582 error = gfs2_rbm_from_block(&rbm, no_addr);
2583 if (!WARN_ON_ONCE(error)) {
2584 if (gfs2_testbit(&rbm, false) != type)
2588 gfs2_glock_dq_uninit(&rgd_gh);
2595 * gfs2_rlist_add - add a RG to a list of RGs
2597 * @rlist: the list of resource groups
2600 * Figure out what RG a block belongs to and add that RG to the list
2602 * FIXME: Don't use NOFAIL
2606 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2609 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2610 struct gfs2_rgrpd *rgd;
2611 struct gfs2_rgrpd **tmp;
2612 unsigned int new_space;
2615 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2619 * The resource group last accessed is kept in the last position.
2622 if (rlist->rl_rgrps) {
2623 rgd = rlist->rl_rgd[rlist->rl_rgrps - 1];
2624 if (rgrp_contains_block(rgd, block))
2626 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2628 rgd = ip->i_res.rs_rbm.rgd;
2629 if (!rgd || !rgrp_contains_block(rgd, block))
2630 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2634 fs_err(sdp, "rlist_add: no rgrp for block %llu\n",
2635 (unsigned long long)block);
2639 for (x = 0; x < rlist->rl_rgrps; x++) {
2640 if (rlist->rl_rgd[x] == rgd) {
2641 swap(rlist->rl_rgd[x],
2642 rlist->rl_rgd[rlist->rl_rgrps - 1]);
2647 if (rlist->rl_rgrps == rlist->rl_space) {
2648 new_space = rlist->rl_space + 10;
2650 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2651 GFP_NOFS | __GFP_NOFAIL);
2653 if (rlist->rl_rgd) {
2654 memcpy(tmp, rlist->rl_rgd,
2655 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2656 kfree(rlist->rl_rgd);
2659 rlist->rl_space = new_space;
2660 rlist->rl_rgd = tmp;
2663 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2667 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2668 * and initialize an array of glock holders for them
2669 * @rlist: the list of resource groups
2671 * FIXME: Don't use NOFAIL
2675 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
2679 rlist->rl_ghs = kmalloc_array(rlist->rl_rgrps,
2680 sizeof(struct gfs2_holder),
2681 GFP_NOFS | __GFP_NOFAIL);
2682 for (x = 0; x < rlist->rl_rgrps; x++)
2683 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2689 * gfs2_rlist_free - free a resource group list
2690 * @rlist: the list of resource groups
2694 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2698 kfree(rlist->rl_rgd);
2700 if (rlist->rl_ghs) {
2701 for (x = 0; x < rlist->rl_rgrps; x++)
2702 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2703 kfree(rlist->rl_ghs);
2704 rlist->rl_ghs = NULL;