2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/prefetch.h>
19 #include <linux/blkdev.h>
20 #include <linux/rbtree.h>
21 #include <linux/random.h>
36 #include "trace_gfs2.h"
39 #define BFITNOENT ((u32)~0)
40 #define NO_BLOCK ((u64)~0)
42 #if BITS_PER_LONG == 32
43 #define LBITMASK (0x55555555UL)
44 #define LBITSKIP55 (0x55555555UL)
45 #define LBITSKIP00 (0x00000000UL)
47 #define LBITMASK (0x5555555555555555UL)
48 #define LBITSKIP55 (0x5555555555555555UL)
49 #define LBITSKIP00 (0x0000000000000000UL)
53 * These routines are used by the resource group routines (rgrp.c)
54 * to keep track of block allocation. Each block is represented by two
55 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
58 * 1 = Used (not metadata)
59 * 2 = Unlinked (still in use) inode
68 static const char valid_change[16] = {
76 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
77 const struct gfs2_inode *ip, bool nowrap);
81 * gfs2_setbit - Set a bit in the bitmaps
82 * @rbm: The position of the bit to set
83 * @do_clone: Also set the clone bitmap, if it exists
84 * @new_state: the new state of the block
88 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
89 unsigned char new_state)
91 unsigned char *byte1, *byte2, *end, cur_state;
92 struct gfs2_bitmap *bi = rbm_bi(rbm);
93 unsigned int buflen = bi->bi_len;
94 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
96 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
97 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
101 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
103 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
104 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
105 rbm->offset, cur_state, new_state);
106 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
107 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
108 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
109 bi->bi_offset, bi->bi_len);
111 gfs2_consist_rgrpd(rbm->rgd);
114 *byte1 ^= (cur_state ^ new_state) << bit;
116 if (do_clone && bi->bi_clone) {
117 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
118 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
119 *byte2 ^= (cur_state ^ new_state) << bit;
124 * gfs2_testbit - test a bit in the bitmaps
125 * @rbm: The bit to test
126 * @use_clone: If true, test the clone bitmap, not the official bitmap.
128 * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps,
129 * not the "real" bitmaps, to avoid allocating recently freed blocks.
131 * Returns: The two bit block state of the requested bit
134 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone)
136 struct gfs2_bitmap *bi = rbm_bi(rbm);
141 if (use_clone && bi->bi_clone)
142 buffer = bi->bi_clone;
144 buffer = bi->bi_bh->b_data;
145 buffer += bi->bi_offset;
146 byte = buffer + (rbm->offset / GFS2_NBBY);
147 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
149 return (*byte >> bit) & GFS2_BIT_MASK;
154 * @ptr: Pointer to bitmap data
155 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
156 * @state: The state we are searching for
158 * We xor the bitmap data with a patter which is the bitwise opposite
159 * of what we are looking for, this gives rise to a pattern of ones
160 * wherever there is a match. Since we have two bits per entry, we
161 * take this pattern, shift it down by one place and then and it with
162 * the original. All the even bit positions (0,2,4, etc) then represent
163 * successful matches, so we mask with 0x55555..... to remove the unwanted
166 * This allows searching of a whole u64 at once (32 blocks) with a
167 * single test (on 64 bit arches).
170 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
173 static const u64 search[] = {
174 [0] = 0xffffffffffffffffULL,
175 [1] = 0xaaaaaaaaaaaaaaaaULL,
176 [2] = 0x5555555555555555ULL,
177 [3] = 0x0000000000000000ULL,
179 tmp = le64_to_cpu(*ptr) ^ search[state];
186 * rs_cmp - multi-block reservation range compare
187 * @blk: absolute file system block number of the new reservation
188 * @len: number of blocks in the new reservation
189 * @rs: existing reservation to compare against
191 * returns: 1 if the block range is beyond the reach of the reservation
192 * -1 if the block range is before the start of the reservation
193 * 0 if the block range overlaps with the reservation
195 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
197 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
199 if (blk >= startblk + rs->rs_free)
201 if (blk + len - 1 < startblk)
207 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
208 * a block in a given allocation state.
209 * @buf: the buffer that holds the bitmaps
210 * @len: the length (in bytes) of the buffer
211 * @goal: start search at this block's bit-pair (within @buffer)
212 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
214 * Scope of @goal and returned block number is only within this bitmap buffer,
215 * not entire rgrp or filesystem. @buffer will be offset from the actual
216 * beginning of a bitmap block buffer, skipping any header structures, but
217 * headers are always a multiple of 64 bits long so that the buffer is
218 * always aligned to a 64 bit boundary.
220 * The size of the buffer is in bytes, but is it assumed that it is
221 * always ok to read a complete multiple of 64 bits at the end
222 * of the block in case the end is no aligned to a natural boundary.
224 * Return: the block number (bitmap buffer scope) that was found
227 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
230 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
231 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
232 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
234 u64 mask = 0x5555555555555555ULL;
237 /* Mask off bits we don't care about at the start of the search */
239 tmp = gfs2_bit_search(ptr, mask, state);
241 while(tmp == 0 && ptr < end) {
242 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
245 /* Mask off any bits which are more than len bytes from the start */
246 if (ptr == end && (len & (sizeof(u64) - 1)))
247 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
248 /* Didn't find anything, so return */
253 bit /= 2; /* two bits per entry in the bitmap */
254 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
258 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
259 * @rbm: The rbm with rgd already set correctly
260 * @block: The block number (filesystem relative)
262 * This sets the bi and offset members of an rbm based on a
263 * resource group and a filesystem relative block number. The
264 * resource group must be set in the rbm on entry, the bi and
265 * offset members will be set by this function.
267 * Returns: 0 on success, or an error code
270 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
272 u64 rblock = block - rbm->rgd->rd_data0;
274 if (WARN_ON_ONCE(rblock > UINT_MAX))
276 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
280 rbm->offset = (u32)(rblock);
281 /* Check if the block is within the first block */
282 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
285 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
286 rbm->offset += (sizeof(struct gfs2_rgrp) -
287 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
288 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
289 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
294 * gfs2_rbm_incr - increment an rbm structure
295 * @rbm: The rbm with rgd already set correctly
297 * This function takes an existing rbm structure and increments it to the next
298 * viable block offset.
300 * Returns: If incrementing the offset would cause the rbm to go past the
301 * end of the rgrp, true is returned, otherwise false.
305 static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
307 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
311 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
320 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
321 * @rbm: Position to search (value/result)
322 * @n_unaligned: Number of unaligned blocks to check
323 * @len: Decremented for each block found (terminate on zero)
325 * Returns: true if a non-free block is encountered
328 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
333 for (n = 0; n < n_unaligned; n++) {
334 res = gfs2_testbit(rbm, true);
335 if (res != GFS2_BLKST_FREE)
340 if (gfs2_rbm_incr(rbm))
348 * gfs2_free_extlen - Return extent length of free blocks
349 * @rrbm: Starting position
350 * @len: Max length to check
352 * Starting at the block specified by the rbm, see how many free blocks
353 * there are, not reading more than len blocks ahead. This can be done
354 * using memchr_inv when the blocks are byte aligned, but has to be done
355 * on a block by block basis in case of unaligned blocks. Also this
356 * function can cope with bitmap boundaries (although it must stop on
357 * a resource group boundary)
359 * Returns: Number of free blocks in the extent
362 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
364 struct gfs2_rbm rbm = *rrbm;
365 u32 n_unaligned = rbm.offset & 3;
369 u8 *ptr, *start, *end;
371 struct gfs2_bitmap *bi;
374 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
377 n_unaligned = len & 3;
378 /* Start is now byte aligned */
381 start = bi->bi_bh->b_data;
383 start = bi->bi_clone;
384 start += bi->bi_offset;
385 end = start + bi->bi_len;
386 BUG_ON(rbm.offset & 3);
387 start += (rbm.offset / GFS2_NBBY);
388 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
389 ptr = memchr_inv(start, 0, bytes);
390 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
391 chunk_size *= GFS2_NBBY;
392 BUG_ON(len < chunk_size);
394 block = gfs2_rbm_to_block(&rbm);
395 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
403 n_unaligned = len & 3;
406 /* Deal with any bits left over at the end */
408 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
414 * gfs2_bitcount - count the number of bits in a certain state
415 * @rgd: the resource group descriptor
416 * @buffer: the buffer that holds the bitmaps
417 * @buflen: the length (in bytes) of the buffer
418 * @state: the state of the block we're looking for
420 * Returns: The number of bits
423 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
424 unsigned int buflen, u8 state)
426 const u8 *byte = buffer;
427 const u8 *end = buffer + buflen;
428 const u8 state1 = state << 2;
429 const u8 state2 = state << 4;
430 const u8 state3 = state << 6;
433 for (; byte < end; byte++) {
434 if (((*byte) & 0x03) == state)
436 if (((*byte) & 0x0C) == state1)
438 if (((*byte) & 0x30) == state2)
440 if (((*byte) & 0xC0) == state3)
448 * gfs2_rgrp_verify - Verify that a resource group is consistent
453 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
455 struct gfs2_sbd *sdp = rgd->rd_sbd;
456 struct gfs2_bitmap *bi = NULL;
457 u32 length = rgd->rd_length;
461 memset(count, 0, 4 * sizeof(u32));
463 /* Count # blocks in each of 4 possible allocation states */
464 for (buf = 0; buf < length; buf++) {
465 bi = rgd->rd_bits + buf;
466 for (x = 0; x < 4; x++)
467 count[x] += gfs2_bitcount(rgd,
473 if (count[0] != rgd->rd_free) {
474 if (gfs2_consist_rgrpd(rgd))
475 fs_err(sdp, "free data mismatch: %u != %u\n",
476 count[0], rgd->rd_free);
480 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
481 if (count[1] != tmp) {
482 if (gfs2_consist_rgrpd(rgd))
483 fs_err(sdp, "used data mismatch: %u != %u\n",
488 if (count[2] + count[3] != rgd->rd_dinodes) {
489 if (gfs2_consist_rgrpd(rgd))
490 fs_err(sdp, "used metadata mismatch: %u != %u\n",
491 count[2] + count[3], rgd->rd_dinodes);
497 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
498 * @sdp: The GFS2 superblock
499 * @blk: The data block number
500 * @exact: True if this needs to be an exact match
502 * The @exact argument should be set to true by most callers. The exception
503 * is when we need to match blocks which are not represented by the rgrp
504 * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
505 * there for alignment purposes. Another way of looking at it is that @exact
506 * matches only valid data/metadata blocks, but with @exact false, it will
507 * match any block within the extent of the rgrp.
509 * Returns: The resource group, or NULL if not found
512 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
514 struct rb_node *n, *next;
515 struct gfs2_rgrpd *cur;
517 spin_lock(&sdp->sd_rindex_spin);
518 n = sdp->sd_rindex_tree.rb_node;
520 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
522 if (blk < cur->rd_addr)
524 else if (blk >= cur->rd_data0 + cur->rd_data)
527 spin_unlock(&sdp->sd_rindex_spin);
529 if (blk < cur->rd_addr)
531 if (blk >= cur->rd_data0 + cur->rd_data)
538 spin_unlock(&sdp->sd_rindex_spin);
544 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
545 * @sdp: The GFS2 superblock
547 * Returns: The first rgrp in the filesystem
550 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
552 const struct rb_node *n;
553 struct gfs2_rgrpd *rgd;
555 spin_lock(&sdp->sd_rindex_spin);
556 n = rb_first(&sdp->sd_rindex_tree);
557 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
558 spin_unlock(&sdp->sd_rindex_spin);
564 * gfs2_rgrpd_get_next - get the next RG
565 * @rgd: the resource group descriptor
567 * Returns: The next rgrp
570 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
572 struct gfs2_sbd *sdp = rgd->rd_sbd;
573 const struct rb_node *n;
575 spin_lock(&sdp->sd_rindex_spin);
576 n = rb_next(&rgd->rd_node);
578 n = rb_first(&sdp->sd_rindex_tree);
580 if (unlikely(&rgd->rd_node == n)) {
581 spin_unlock(&sdp->sd_rindex_spin);
584 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
585 spin_unlock(&sdp->sd_rindex_spin);
589 void check_and_update_goal(struct gfs2_inode *ip)
591 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
592 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
593 ip->i_goal = ip->i_no_addr;
596 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
600 for (x = 0; x < rgd->rd_length; x++) {
601 struct gfs2_bitmap *bi = rgd->rd_bits + x;
608 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
609 * plus a quota allocations data structure, if necessary
610 * @ip: the inode for this reservation
612 int gfs2_rsqa_alloc(struct gfs2_inode *ip)
614 return gfs2_qa_alloc(ip);
617 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
619 struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
621 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
622 (unsigned long long)ip->i_no_addr,
623 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
624 rs->rs_rbm.offset, rs->rs_free);
628 * __rs_deltree - remove a multi-block reservation from the rgd tree
629 * @rs: The reservation to remove
632 static void __rs_deltree(struct gfs2_blkreserv *rs)
634 struct gfs2_rgrpd *rgd;
636 if (!gfs2_rs_active(rs))
639 rgd = rs->rs_rbm.rgd;
640 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
641 rb_erase(&rs->rs_node, &rgd->rd_rstree);
642 RB_CLEAR_NODE(&rs->rs_node);
645 u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
647 struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
648 struct gfs2_bitmap *start, *last;
650 /* return reserved blocks to the rgrp */
651 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
652 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
653 /* The rgrp extent failure point is likely not to increase;
654 it will only do so if the freed blocks are somehow
655 contiguous with a span of free blocks that follows. Still,
656 it will force the number to be recalculated later. */
657 rgd->rd_extfail_pt += rs->rs_free;
659 if (gfs2_rbm_from_block(&last_rbm, last_block))
661 start = rbm_bi(&rs->rs_rbm);
662 last = rbm_bi(&last_rbm);
664 clear_bit(GBF_FULL, &start->bi_flags);
665 while (start++ != last);
670 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
671 * @rs: The reservation to remove
674 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
676 struct gfs2_rgrpd *rgd;
678 rgd = rs->rs_rbm.rgd;
680 spin_lock(&rgd->rd_rsspin);
683 spin_unlock(&rgd->rd_rsspin);
688 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
689 * @ip: The inode for this reservation
690 * @wcount: The inode's write count, or NULL
693 void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
695 down_write(&ip->i_rw_mutex);
696 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
697 gfs2_rs_deltree(&ip->i_res);
698 up_write(&ip->i_rw_mutex);
699 gfs2_qa_delete(ip, wcount);
703 * return_all_reservations - return all reserved blocks back to the rgrp.
704 * @rgd: the rgrp that needs its space back
706 * We previously reserved a bunch of blocks for allocation. Now we need to
707 * give them back. This leave the reservation structures in tact, but removes
708 * all of their corresponding "no-fly zones".
710 static void return_all_reservations(struct gfs2_rgrpd *rgd)
713 struct gfs2_blkreserv *rs;
715 spin_lock(&rgd->rd_rsspin);
716 while ((n = rb_first(&rgd->rd_rstree))) {
717 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
720 spin_unlock(&rgd->rd_rsspin);
723 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
726 struct gfs2_rgrpd *rgd;
727 struct gfs2_glock *gl;
729 while ((n = rb_first(&sdp->sd_rindex_tree))) {
730 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
733 rb_erase(n, &sdp->sd_rindex_tree);
736 glock_clear_object(gl, rgd);
737 gfs2_rgrp_brelse(rgd);
741 gfs2_free_clones(rgd);
742 return_all_reservations(rgd);
745 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
749 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
751 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
752 pr_info("ri_length = %u\n", rgd->rd_length);
753 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
754 pr_info("ri_data = %u\n", rgd->rd_data);
755 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
759 * gfs2_compute_bitstructs - Compute the bitmap sizes
760 * @rgd: The resource group descriptor
762 * Calculates bitmap descriptors, one for each block that contains bitmap data
767 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
769 struct gfs2_sbd *sdp = rgd->rd_sbd;
770 struct gfs2_bitmap *bi;
771 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
772 u32 bytes_left, bytes;
778 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
782 bytes_left = rgd->rd_bitbytes;
784 for (x = 0; x < length; x++) {
785 bi = rgd->rd_bits + x;
788 /* small rgrp; bitmap stored completely in header block */
791 bi->bi_offset = sizeof(struct gfs2_rgrp);
794 bi->bi_blocks = bytes * GFS2_NBBY;
797 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
798 bi->bi_offset = sizeof(struct gfs2_rgrp);
801 bi->bi_blocks = bytes * GFS2_NBBY;
803 } else if (x + 1 == length) {
805 bi->bi_offset = sizeof(struct gfs2_meta_header);
806 bi->bi_start = rgd->rd_bitbytes - bytes_left;
808 bi->bi_blocks = bytes * GFS2_NBBY;
811 bytes = sdp->sd_sb.sb_bsize -
812 sizeof(struct gfs2_meta_header);
813 bi->bi_offset = sizeof(struct gfs2_meta_header);
814 bi->bi_start = rgd->rd_bitbytes - bytes_left;
816 bi->bi_blocks = bytes * GFS2_NBBY;
823 gfs2_consist_rgrpd(rgd);
826 bi = rgd->rd_bits + (length - 1);
827 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
828 if (gfs2_consist_rgrpd(rgd)) {
829 gfs2_rindex_print(rgd);
830 fs_err(sdp, "start=%u len=%u offset=%u\n",
831 bi->bi_start, bi->bi_len, bi->bi_offset);
840 * gfs2_ri_total - Total up the file system space, according to the rindex.
841 * @sdp: the filesystem
844 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
847 struct inode *inode = sdp->sd_rindex;
848 struct gfs2_inode *ip = GFS2_I(inode);
849 char buf[sizeof(struct gfs2_rindex)];
852 for (rgrps = 0;; rgrps++) {
853 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
855 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
857 error = gfs2_internal_read(ip, buf, &pos,
858 sizeof(struct gfs2_rindex));
859 if (error != sizeof(struct gfs2_rindex))
861 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
866 static int rgd_insert(struct gfs2_rgrpd *rgd)
868 struct gfs2_sbd *sdp = rgd->rd_sbd;
869 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
871 /* Figure out where to put new node */
873 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
877 if (rgd->rd_addr < cur->rd_addr)
878 newn = &((*newn)->rb_left);
879 else if (rgd->rd_addr > cur->rd_addr)
880 newn = &((*newn)->rb_right);
885 rb_link_node(&rgd->rd_node, parent, newn);
886 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
892 * read_rindex_entry - Pull in a new resource index entry from the disk
893 * @ip: Pointer to the rindex inode
895 * Returns: 0 on success, > 0 on EOF, error code otherwise
898 static int read_rindex_entry(struct gfs2_inode *ip)
900 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
901 const unsigned bsize = sdp->sd_sb.sb_bsize;
902 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
903 struct gfs2_rindex buf;
905 struct gfs2_rgrpd *rgd;
907 if (pos >= i_size_read(&ip->i_inode))
910 error = gfs2_internal_read(ip, (char *)&buf, &pos,
911 sizeof(struct gfs2_rindex));
913 if (error != sizeof(struct gfs2_rindex))
914 return (error == 0) ? 1 : error;
916 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
922 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
923 rgd->rd_length = be32_to_cpu(buf.ri_length);
924 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
925 rgd->rd_data = be32_to_cpu(buf.ri_data);
926 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
927 spin_lock_init(&rgd->rd_rsspin);
929 error = compute_bitstructs(rgd);
933 error = gfs2_glock_get(sdp, rgd->rd_addr,
934 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
938 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
939 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
940 if (rgd->rd_data > sdp->sd_max_rg_data)
941 sdp->sd_max_rg_data = rgd->rd_data;
942 spin_lock(&sdp->sd_rindex_spin);
943 error = rgd_insert(rgd);
944 spin_unlock(&sdp->sd_rindex_spin);
946 glock_set_object(rgd->rd_gl, rgd);
947 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
948 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
949 rgd->rd_length) * bsize) - 1;
953 error = 0; /* someone else read in the rgrp; free it and ignore it */
954 gfs2_glock_put(rgd->rd_gl);
959 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
964 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
965 * @sdp: the GFS2 superblock
967 * The purpose of this function is to select a subset of the resource groups
968 * and mark them as PREFERRED. We do it in such a way that each node prefers
969 * to use a unique set of rgrps to minimize glock contention.
971 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
973 struct gfs2_rgrpd *rgd, *first;
976 /* Skip an initial number of rgrps, based on this node's journal ID.
977 That should start each node out on its own set. */
978 rgd = gfs2_rgrpd_get_first(sdp);
979 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
980 rgd = gfs2_rgrpd_get_next(rgd);
984 rgd->rd_flags |= GFS2_RDF_PREFERRED;
985 for (i = 0; i < sdp->sd_journals; i++) {
986 rgd = gfs2_rgrpd_get_next(rgd);
987 if (!rgd || rgd == first)
990 } while (rgd && rgd != first);
994 * gfs2_ri_update - Pull in a new resource index from the disk
995 * @ip: pointer to the rindex inode
997 * Returns: 0 on successful update, error code otherwise
1000 static int gfs2_ri_update(struct gfs2_inode *ip)
1002 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1006 error = read_rindex_entry(ip);
1007 } while (error == 0);
1012 if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
1013 fs_err(sdp, "no resource groups found in the file system.\n");
1016 set_rgrp_preferences(sdp);
1018 sdp->sd_rindex_uptodate = 1;
1023 * gfs2_rindex_update - Update the rindex if required
1024 * @sdp: The GFS2 superblock
1026 * We grab a lock on the rindex inode to make sure that it doesn't
1027 * change whilst we are performing an operation. We keep this lock
1028 * for quite long periods of time compared to other locks. This
1029 * doesn't matter, since it is shared and it is very, very rarely
1030 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1032 * This makes sure that we're using the latest copy of the resource index
1033 * special file, which might have been updated if someone expanded the
1034 * filesystem (via gfs2_grow utility), which adds new resource groups.
1036 * Returns: 0 on succeess, error code otherwise
1039 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1041 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1042 struct gfs2_glock *gl = ip->i_gl;
1043 struct gfs2_holder ri_gh;
1045 int unlock_required = 0;
1047 /* Read new copy from disk if we don't have the latest */
1048 if (!sdp->sd_rindex_uptodate) {
1049 if (!gfs2_glock_is_locked_by_me(gl)) {
1050 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1053 unlock_required = 1;
1055 if (!sdp->sd_rindex_uptodate)
1056 error = gfs2_ri_update(ip);
1057 if (unlock_required)
1058 gfs2_glock_dq_uninit(&ri_gh);
1064 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1066 const struct gfs2_rgrp *str = buf;
1069 rg_flags = be32_to_cpu(str->rg_flags);
1070 rg_flags &= ~GFS2_RDF_MASK;
1071 rgd->rd_flags &= GFS2_RDF_MASK;
1072 rgd->rd_flags |= rg_flags;
1073 rgd->rd_free = be32_to_cpu(str->rg_free);
1074 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1075 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1076 /* rd_data0, rd_data and rd_bitbytes already set from rindex */
1079 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1081 const struct gfs2_rgrp *str = buf;
1083 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1084 rgl->rl_flags = str->rg_flags;
1085 rgl->rl_free = str->rg_free;
1086 rgl->rl_dinodes = str->rg_dinodes;
1087 rgl->rl_igeneration = str->rg_igeneration;
1091 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1093 struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
1094 struct gfs2_rgrp *str = buf;
1097 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1098 str->rg_free = cpu_to_be32(rgd->rd_free);
1099 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1102 else if (next->rd_addr > rgd->rd_addr)
1103 str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
1104 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1105 str->rg_data0 = cpu_to_be64(rgd->rd_data0);
1106 str->rg_data = cpu_to_be32(rgd->rd_data);
1107 str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
1109 crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp));
1110 str->rg_crc = cpu_to_be32(crc);
1112 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1113 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
1116 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1118 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1119 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1121 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1122 rgl->rl_dinodes != str->rg_dinodes ||
1123 rgl->rl_igeneration != str->rg_igeneration)
1128 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1130 struct gfs2_bitmap *bi;
1131 const u32 length = rgd->rd_length;
1132 const u8 *buffer = NULL;
1133 u32 i, goal, count = 0;
1135 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1137 buffer = bi->bi_bh->b_data + bi->bi_offset;
1138 WARN_ON(!buffer_uptodate(bi->bi_bh));
1139 while (goal < bi->bi_len * GFS2_NBBY) {
1140 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1141 GFS2_BLKST_UNLINKED);
1142 if (goal == BFITNOENT)
1154 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1155 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1157 * Read in all of a Resource Group's header and bitmap blocks.
1158 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1163 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1165 struct gfs2_sbd *sdp = rgd->rd_sbd;
1166 struct gfs2_glock *gl = rgd->rd_gl;
1167 unsigned int length = rgd->rd_length;
1168 struct gfs2_bitmap *bi;
1172 if (rgd->rd_bits[0].bi_bh != NULL)
1175 for (x = 0; x < length; x++) {
1176 bi = rgd->rd_bits + x;
1177 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1182 for (y = length; y--;) {
1183 bi = rgd->rd_bits + y;
1184 error = gfs2_meta_wait(sdp, bi->bi_bh);
1187 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1188 GFS2_METATYPE_RG)) {
1194 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1195 for (x = 0; x < length; x++)
1196 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1197 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1198 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1199 rgd->rd_free_clone = rgd->rd_free;
1200 /* max out the rgrp allocation failure point */
1201 rgd->rd_extfail_pt = rgd->rd_free;
1203 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1204 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1205 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1206 rgd->rd_bits[0].bi_bh->b_data);
1208 else if (sdp->sd_args.ar_rgrplvb) {
1209 if (!gfs2_rgrp_lvb_valid(rgd)){
1210 gfs2_consist_rgrpd(rgd);
1214 if (rgd->rd_rgl->rl_unlinked == 0)
1215 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1221 bi = rgd->rd_bits + x;
1224 gfs2_assert_warn(sdp, !bi->bi_clone);
1230 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1234 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1237 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1238 return gfs2_rgrp_bh_get(rgd);
1240 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1241 rl_flags &= ~GFS2_RDF_MASK;
1242 rgd->rd_flags &= GFS2_RDF_MASK;
1243 rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
1244 if (rgd->rd_rgl->rl_unlinked == 0)
1245 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1246 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1247 rgd->rd_free_clone = rgd->rd_free;
1248 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1249 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1253 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1255 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1256 struct gfs2_sbd *sdp = rgd->rd_sbd;
1258 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1260 return gfs2_rgrp_bh_get(rgd);
1264 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1265 * @rgd: The resource group
1269 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1271 int x, length = rgd->rd_length;
1273 for (x = 0; x < length; x++) {
1274 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1284 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1285 * @gh: The glock holder for the resource group
1289 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1291 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1292 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1293 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1295 if (rgd && demote_requested)
1296 gfs2_rgrp_brelse(rgd);
1299 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1300 struct buffer_head *bh,
1301 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1303 struct super_block *sb = sdp->sd_vfs;
1306 sector_t nr_blks = 0;
1312 for (x = 0; x < bi->bi_len; x++) {
1313 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1314 clone += bi->bi_offset;
1317 const u8 *orig = bh->b_data + bi->bi_offset + x;
1318 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1320 diff = ~(*clone | (*clone >> 1));
1325 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1329 goto start_new_extent;
1330 if ((start + nr_blks) != blk) {
1331 if (nr_blks >= minlen) {
1332 rv = sb_issue_discard(sb,
1349 if (nr_blks >= minlen) {
1350 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1356 *ptrimmed = trimmed;
1360 if (sdp->sd_args.ar_discard)
1361 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
1362 sdp->sd_args.ar_discard = 0;
1367 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1368 * @filp: Any file on the filesystem
1369 * @argp: Pointer to the arguments (also used to pass result)
1371 * Returns: 0 on success, otherwise error code
1374 int gfs2_fitrim(struct file *filp, void __user *argp)
1376 struct inode *inode = file_inode(filp);
1377 struct gfs2_sbd *sdp = GFS2_SB(inode);
1378 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1379 struct buffer_head *bh;
1380 struct gfs2_rgrpd *rgd;
1381 struct gfs2_rgrpd *rgd_end;
1382 struct gfs2_holder gh;
1383 struct fstrim_range r;
1387 u64 start, end, minlen;
1389 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1391 if (!capable(CAP_SYS_ADMIN))
1394 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1397 if (!blk_queue_discard(q))
1400 if (copy_from_user(&r, argp, sizeof(r)))
1403 ret = gfs2_rindex_update(sdp);
1407 start = r.start >> bs_shift;
1408 end = start + (r.len >> bs_shift);
1409 minlen = max_t(u64, r.minlen,
1410 q->limits.discard_granularity) >> bs_shift;
1412 if (end <= start || minlen > sdp->sd_max_rg_data)
1415 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1416 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1418 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1419 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1420 return -EINVAL; /* start is beyond the end of the fs */
1424 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1428 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1429 /* Trim each bitmap in the rgrp */
1430 for (x = 0; x < rgd->rd_length; x++) {
1431 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1432 ret = gfs2_rgrp_send_discards(sdp,
1433 rgd->rd_data0, NULL, bi, minlen,
1436 gfs2_glock_dq_uninit(&gh);
1442 /* Mark rgrp as having been trimmed */
1443 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1445 bh = rgd->rd_bits[0].bi_bh;
1446 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1447 gfs2_trans_add_meta(rgd->rd_gl, bh);
1448 gfs2_rgrp_out(rgd, bh->b_data);
1449 gfs2_trans_end(sdp);
1452 gfs2_glock_dq_uninit(&gh);
1457 rgd = gfs2_rgrpd_get_next(rgd);
1461 r.len = trimmed << bs_shift;
1462 if (copy_to_user(argp, &r, sizeof(r)))
1469 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1470 * @ip: the inode structure
1473 static void rs_insert(struct gfs2_inode *ip)
1475 struct rb_node **newn, *parent = NULL;
1477 struct gfs2_blkreserv *rs = &ip->i_res;
1478 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1479 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1481 BUG_ON(gfs2_rs_active(rs));
1483 spin_lock(&rgd->rd_rsspin);
1484 newn = &rgd->rd_rstree.rb_node;
1486 struct gfs2_blkreserv *cur =
1487 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1490 rc = rs_cmp(fsblock, rs->rs_free, cur);
1492 newn = &((*newn)->rb_right);
1494 newn = &((*newn)->rb_left);
1496 spin_unlock(&rgd->rd_rsspin);
1502 rb_link_node(&rs->rs_node, parent, newn);
1503 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1505 /* Do our rgrp accounting for the reservation */
1506 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1507 spin_unlock(&rgd->rd_rsspin);
1508 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1512 * rgd_free - return the number of free blocks we can allocate.
1513 * @rgd: the resource group
1515 * This function returns the number of free blocks for an rgrp.
1516 * That's the clone-free blocks (blocks that are free, not including those
1517 * still being used for unlinked files that haven't been deleted.)
1519 * It also subtracts any blocks reserved by someone else, but does not
1520 * include free blocks that are still part of our current reservation,
1521 * because obviously we can (and will) allocate them.
1523 static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
1525 u32 tot_reserved, tot_free;
1527 if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
1529 tot_reserved = rgd->rd_reserved - rs->rs_free;
1531 if (rgd->rd_free_clone < tot_reserved)
1534 tot_free = rgd->rd_free_clone - tot_reserved;
1540 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1541 * @rgd: the resource group descriptor
1542 * @ip: pointer to the inode for which we're reserving blocks
1543 * @ap: the allocation parameters
1547 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1548 const struct gfs2_alloc_parms *ap)
1550 struct gfs2_rbm rbm = { .rgd = rgd, };
1552 struct gfs2_blkreserv *rs = &ip->i_res;
1554 u32 free_blocks = rgd_free(rgd, rs);
1556 struct inode *inode = &ip->i_inode;
1558 if (S_ISDIR(inode->i_mode))
1561 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1562 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1564 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1567 /* Find bitmap block that contains bits for goal block */
1568 if (rgrp_contains_block(rgd, ip->i_goal))
1571 goal = rgd->rd_last_alloc + rgd->rd_data0;
1573 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1576 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
1579 rs->rs_free = extlen;
1582 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1583 rgd->rd_last_alloc = 0;
1588 * gfs2_next_unreserved_block - Return next block that is not reserved
1589 * @rgd: The resource group
1590 * @block: The starting block
1591 * @length: The required length
1592 * @ip: Ignore any reservations for this inode
1594 * If the block does not appear in any reservation, then return the
1595 * block number unchanged. If it does appear in the reservation, then
1596 * keep looking through the tree of reservations in order to find the
1597 * first block number which is not reserved.
1600 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1602 const struct gfs2_inode *ip)
1604 struct gfs2_blkreserv *rs;
1608 spin_lock(&rgd->rd_rsspin);
1609 n = rgd->rd_rstree.rb_node;
1611 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1612 rc = rs_cmp(block, length, rs);
1622 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
1623 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1627 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1631 spin_unlock(&rgd->rd_rsspin);
1636 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1637 * @rbm: The current position in the resource group
1638 * @ip: The inode for which we are searching for blocks
1639 * @minext: The minimum extent length
1640 * @maxext: A pointer to the maximum extent structure
1642 * This checks the current position in the rgrp to see whether there is
1643 * a reservation covering this block. If not then this function is a
1644 * no-op. If there is, then the position is moved to the end of the
1645 * contiguous reservation(s) so that we are pointing at the first
1646 * non-reserved block.
1648 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1651 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1652 const struct gfs2_inode *ip,
1654 struct gfs2_extent *maxext)
1656 u64 block = gfs2_rbm_to_block(rbm);
1662 * If we have a minimum extent length, then skip over any extent
1663 * which is less than the min extent length in size.
1666 extlen = gfs2_free_extlen(rbm, minext);
1667 if (extlen <= maxext->len)
1672 * Check the extent which has been found against the reservations
1673 * and skip if parts of it are already reserved
1675 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1676 if (nblock == block) {
1677 if (!minext || extlen >= minext)
1680 if (extlen > maxext->len) {
1681 maxext->len = extlen;
1685 nblock = block + extlen;
1687 ret = gfs2_rbm_from_block(rbm, nblock);
1694 * gfs2_rbm_find - Look for blocks of a particular state
1695 * @rbm: Value/result starting position and final position
1696 * @state: The state which we want to find
1697 * @minext: Pointer to the requested extent length (NULL for a single block)
1698 * This is updated to be the actual reservation size.
1699 * @ip: If set, check for reservations
1700 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1701 * around until we've reached the starting point.
1704 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1705 * has no free blocks in it.
1706 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1707 * has come up short on a free block search.
1709 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1712 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1713 const struct gfs2_inode *ip, bool nowrap)
1715 struct buffer_head *bh;
1718 int first_bii = rbm->bii;
1719 u32 first_offset = rbm->offset;
1723 int iters = rbm->rgd->rd_length;
1725 struct gfs2_bitmap *bi;
1726 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1728 /* If we are not starting at the beginning of a bitmap, then we
1729 * need to add one to the bitmap count to ensure that we search
1730 * the starting bitmap twice.
1732 if (rbm->offset != 0)
1737 if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
1738 test_bit(GBF_FULL, &bi->bi_flags) &&
1739 (state == GFS2_BLKST_FREE))
1743 buffer = bh->b_data + bi->bi_offset;
1744 WARN_ON(!buffer_uptodate(bh));
1745 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1746 buffer = bi->bi_clone + bi->bi_offset;
1747 initial_offset = rbm->offset;
1748 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1749 if (offset == BFITNOENT)
1751 rbm->offset = offset;
1755 initial_bii = rbm->bii;
1756 ret = gfs2_reservation_check_and_update(rbm, ip,
1757 minext ? *minext : 0,
1762 n += (rbm->bii - initial_bii);
1765 if (ret == -E2BIG) {
1768 n += (rbm->bii - initial_bii);
1769 goto res_covered_end_of_rgrp;
1773 bitmap_full: /* Mark bitmap as full and fall through */
1774 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1775 set_bit(GBF_FULL, &bi->bi_flags);
1777 next_bitmap: /* Find next bitmap in the rgrp */
1780 if (rbm->bii == rbm->rgd->rd_length)
1782 res_covered_end_of_rgrp:
1783 if ((rbm->bii == 0) && nowrap)
1791 if (minext == NULL || state != GFS2_BLKST_FREE)
1794 /* If the extent was too small, and it's smaller than the smallest
1795 to have failed before, remember for future reference that it's
1796 useless to search this rgrp again for this amount or more. */
1797 if ((first_offset == 0) && (first_bii == 0) &&
1798 (*minext < rbm->rgd->rd_extfail_pt))
1799 rbm->rgd->rd_extfail_pt = *minext;
1801 /* If the maximum extent we found is big enough to fulfill the
1802 minimum requirements, use it anyway. */
1805 *minext = maxext.len;
1813 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1815 * @last_unlinked: block address of the last dinode we unlinked
1816 * @skip: block address we should explicitly not unlink
1818 * Returns: 0 if no error
1819 * The inode, if one has been found, in inode.
1822 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1825 struct gfs2_sbd *sdp = rgd->rd_sbd;
1826 struct gfs2_glock *gl;
1827 struct gfs2_inode *ip;
1830 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1833 down_write(&sdp->sd_log_flush_lock);
1834 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1836 up_write(&sdp->sd_log_flush_lock);
1837 if (error == -ENOSPC)
1839 if (WARN_ON_ONCE(error))
1842 block = gfs2_rbm_to_block(&rbm);
1843 if (gfs2_rbm_from_block(&rbm, block + 1))
1845 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1849 *last_unlinked = block;
1851 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1855 /* If the inode is already in cache, we can ignore it here
1856 * because the existing inode disposal code will deal with
1857 * it when all refs have gone away. Accessing gl_object like
1858 * this is not safe in general. Here it is ok because we do
1859 * not dereference the pointer, and we only need an approx
1860 * answer to whether it is NULL or not.
1864 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1869 /* Limit reclaim to sensible number of tasks */
1870 if (found > NR_CPUS)
1874 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1879 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1880 * @rgd: The rgrp in question
1881 * @loops: An indication of how picky we can be (0=very, 1=less so)
1883 * This function uses the recently added glock statistics in order to
1884 * figure out whether a parciular resource group is suffering from
1885 * contention from multiple nodes. This is done purely on the basis
1886 * of timings, since this is the only data we have to work with and
1887 * our aim here is to reject a resource group which is highly contended
1888 * but (very important) not to do this too often in order to ensure that
1889 * we do not land up introducing fragmentation by changing resource
1890 * groups when not actually required.
1892 * The calculation is fairly simple, we want to know whether the SRTTB
1893 * (i.e. smoothed round trip time for blocking operations) to acquire
1894 * the lock for this rgrp's glock is significantly greater than the
1895 * time taken for resource groups on average. We introduce a margin in
1896 * the form of the variable @var which is computed as the sum of the two
1897 * respective variences, and multiplied by a factor depending on @loops
1898 * and whether we have a lot of data to base the decision on. This is
1899 * then tested against the square difference of the means in order to
1900 * decide whether the result is statistically significant or not.
1902 * Returns: A boolean verdict on the congestion status
1905 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1907 const struct gfs2_glock *gl = rgd->rd_gl;
1908 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1909 struct gfs2_lkstats *st;
1910 u64 r_dcount, l_dcount;
1911 u64 l_srttb, a_srttb = 0;
1915 int cpu, nonzero = 0;
1918 for_each_present_cpu(cpu) {
1919 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1920 if (st->stats[GFS2_LKS_SRTTB]) {
1921 a_srttb += st->stats[GFS2_LKS_SRTTB];
1925 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1927 do_div(a_srttb, nonzero);
1928 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1929 var = st->stats[GFS2_LKS_SRTTVARB] +
1930 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1933 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1934 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1936 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1939 srttb_diff = a_srttb - l_srttb;
1940 sqr_diff = srttb_diff * srttb_diff;
1943 if (l_dcount < 8 || r_dcount < 8)
1948 return ((srttb_diff < 0) && (sqr_diff > var));
1952 * gfs2_rgrp_used_recently
1953 * @rs: The block reservation with the rgrp to test
1954 * @msecs: The time limit in milliseconds
1956 * Returns: True if the rgrp glock has been used within the time limit
1958 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1963 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1964 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1966 return tdiff > (msecs * 1000 * 1000);
1969 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1971 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1974 get_random_bytes(&skip, sizeof(skip));
1975 return skip % sdp->sd_rgrps;
1978 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1980 struct gfs2_rgrpd *rgd = *pos;
1981 struct gfs2_sbd *sdp = rgd->rd_sbd;
1983 rgd = gfs2_rgrpd_get_next(rgd);
1985 rgd = gfs2_rgrpd_get_first(sdp);
1987 if (rgd != begin) /* If we didn't wrap */
1993 * fast_to_acquire - determine if a resource group will be fast to acquire
1995 * If this is one of our preferred rgrps, it should be quicker to acquire,
1996 * because we tried to set ourselves up as dlm lock master.
1998 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
2000 struct gfs2_glock *gl = rgd->rd_gl;
2002 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
2003 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
2004 !test_bit(GLF_DEMOTE, &gl->gl_flags))
2006 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
2012 * gfs2_inplace_reserve - Reserve space in the filesystem
2013 * @ip: the inode to reserve space for
2014 * @ap: the allocation parameters
2016 * We try our best to find an rgrp that has at least ap->target blocks
2017 * available. After a couple of passes (loops == 2), the prospects of finding
2018 * such an rgrp diminish. At this stage, we return the first rgrp that has
2019 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
2020 * the number of blocks available in the chosen rgrp.
2022 * Returns: 0 on success,
2023 * -ENOMEM if a suitable rgrp can't be found
2027 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
2029 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2030 struct gfs2_rgrpd *begin = NULL;
2031 struct gfs2_blkreserv *rs = &ip->i_res;
2032 int error = 0, rg_locked, flags = 0;
2033 u64 last_unlinked = NO_BLOCK;
2035 u32 free_blocks, skip = 0;
2037 if (sdp->sd_args.ar_rgrplvb)
2039 if (gfs2_assert_warn(sdp, ap->target))
2041 if (gfs2_rs_active(rs)) {
2042 begin = rs->rs_rbm.rgd;
2043 } else if (rs->rs_rbm.rgd &&
2044 rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) {
2045 begin = rs->rs_rbm.rgd;
2047 check_and_update_goal(ip);
2048 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
2050 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
2051 skip = gfs2_orlov_skip(ip);
2052 if (rs->rs_rbm.rgd == NULL)
2058 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
2062 if (!gfs2_rs_active(rs)) {
2064 !fast_to_acquire(rs->rs_rbm.rgd))
2067 gfs2_rgrp_used_recently(rs, 1000) &&
2068 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2071 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2072 LM_ST_EXCLUSIVE, flags,
2074 if (unlikely(error))
2076 if (!gfs2_rs_active(rs) && (loops < 2) &&
2077 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2079 if (sdp->sd_args.ar_rgrplvb) {
2080 error = update_rgrp_lvb(rs->rs_rbm.rgd);
2081 if (unlikely(error)) {
2082 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2088 /* Skip unuseable resource groups */
2089 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2091 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2094 if (sdp->sd_args.ar_rgrplvb)
2095 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2097 /* Get a reservation if we don't already have one */
2098 if (!gfs2_rs_active(rs))
2099 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2101 /* Skip rgrps when we can't get a reservation on first pass */
2102 if (!gfs2_rs_active(rs) && (loops < 1))
2105 /* If rgrp has enough free space, use it */
2106 free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
2107 if (free_blocks >= ap->target ||
2108 (loops == 2 && ap->min_target &&
2109 free_blocks >= ap->min_target)) {
2110 ap->allowed = free_blocks;
2114 /* Check for unlinked inodes which can be reclaimed */
2115 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2116 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2119 /* Drop reservation, if we couldn't use reserved rgrp */
2120 if (gfs2_rs_active(rs))
2121 gfs2_rs_deltree(rs);
2123 /* Unlock rgrp if required */
2125 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2127 /* Find the next rgrp, and continue looking */
2128 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2133 /* If we've scanned all the rgrps, but found no free blocks
2134 * then this checks for some less likely conditions before
2138 /* Check that fs hasn't grown if writing to rindex */
2139 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2140 error = gfs2_ri_update(ip);
2144 /* Flushing the log may release space */
2146 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
2147 GFS2_LFC_INPLACE_RESERVE);
2154 * gfs2_inplace_release - release an inplace reservation
2155 * @ip: the inode the reservation was taken out on
2157 * Release a reservation made by gfs2_inplace_reserve().
2160 void gfs2_inplace_release(struct gfs2_inode *ip)
2162 struct gfs2_blkreserv *rs = &ip->i_res;
2164 if (gfs2_holder_initialized(&rs->rs_rgd_gh))
2165 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2169 * gfs2_alloc_extent - allocate an extent from a given bitmap
2170 * @rbm: the resource group information
2171 * @dinode: TRUE if the first block we allocate is for a dinode
2172 * @n: The extent length (value/result)
2174 * Add the bitmap buffer to the transaction.
2175 * Set the found bits to @new_state to change block's allocation state.
2177 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2180 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2181 const unsigned int elen = *n;
2186 block = gfs2_rbm_to_block(rbm);
2187 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2188 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2191 ret = gfs2_rbm_from_block(&pos, block);
2192 if (ret || gfs2_testbit(&pos, true) != GFS2_BLKST_FREE)
2194 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2195 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2202 * rgblk_free - Change alloc state of given block(s)
2203 * @sdp: the filesystem
2204 * @bstart: the start of a run of blocks to free
2205 * @blen: the length of the block run (all must lie within ONE RG!)
2206 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2208 * Returns: Resource group containing the block(s)
2211 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2212 u32 blen, unsigned char new_state)
2214 struct gfs2_rbm rbm;
2215 struct gfs2_bitmap *bi, *bi_prev = NULL;
2217 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2219 if (gfs2_consist(sdp))
2220 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2224 gfs2_rbm_from_block(&rbm, bstart);
2227 if (bi != bi_prev) {
2228 if (!bi->bi_clone) {
2229 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2230 GFP_NOFS | __GFP_NOFAIL);
2231 memcpy(bi->bi_clone + bi->bi_offset,
2232 bi->bi_bh->b_data + bi->bi_offset,
2235 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2238 gfs2_setbit(&rbm, false, new_state);
2239 gfs2_rbm_incr(&rbm);
2246 * gfs2_rgrp_dump - print out an rgrp
2247 * @seq: The iterator
2248 * @gl: The glock in question
2252 void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2254 struct gfs2_rgrpd *rgd = gl->gl_object;
2255 struct gfs2_blkreserv *trs;
2256 const struct rb_node *n;
2260 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2261 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2262 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2263 rgd->rd_reserved, rgd->rd_extfail_pt);
2264 spin_lock(&rgd->rd_rsspin);
2265 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2266 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2269 spin_unlock(&rgd->rd_rsspin);
2272 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2274 struct gfs2_sbd *sdp = rgd->rd_sbd;
2275 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2276 (unsigned long long)rgd->rd_addr);
2277 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2278 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2279 rgd->rd_flags |= GFS2_RDF_ERROR;
2283 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2284 * @ip: The inode we have just allocated blocks for
2285 * @rbm: The start of the allocated blocks
2286 * @len: The extent length
2288 * Adjusts a reservation after an allocation has taken place. If the
2289 * reservation does not match the allocation, or if it is now empty
2290 * then it is removed.
2293 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2294 const struct gfs2_rbm *rbm, unsigned len)
2296 struct gfs2_blkreserv *rs = &ip->i_res;
2297 struct gfs2_rgrpd *rgd = rbm->rgd;
2302 spin_lock(&rgd->rd_rsspin);
2303 if (gfs2_rs_active(rs)) {
2304 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2305 block = gfs2_rbm_to_block(rbm);
2306 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2307 rlen = min(rs->rs_free, len);
2308 rs->rs_free -= rlen;
2309 rgd->rd_reserved -= rlen;
2310 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2311 if (rs->rs_free && !ret)
2313 /* We used up our block reservation, so we should
2314 reserve more blocks next time. */
2315 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
2320 spin_unlock(&rgd->rd_rsspin);
2324 * gfs2_set_alloc_start - Set starting point for block allocation
2325 * @rbm: The rbm which will be set to the required location
2326 * @ip: The gfs2 inode
2327 * @dinode: Flag to say if allocation includes a new inode
2329 * This sets the starting point from the reservation if one is active
2330 * otherwise it falls back to guessing a start point based on the
2331 * inode's goal block or the last allocation point in the rgrp.
2334 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2335 const struct gfs2_inode *ip, bool dinode)
2339 if (gfs2_rs_active(&ip->i_res)) {
2340 *rbm = ip->i_res.rs_rbm;
2344 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2347 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2349 gfs2_rbm_from_block(rbm, goal);
2353 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2354 * @ip: the inode to allocate the block for
2355 * @bn: Used to return the starting block number
2356 * @nblocks: requested number of blocks/extent length (value/result)
2357 * @dinode: 1 if we're allocating a dinode block, else 0
2358 * @generation: the generation number of the inode
2360 * Returns: 0 or error
2363 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2364 bool dinode, u64 *generation)
2366 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2367 struct buffer_head *dibh;
2368 struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, };
2370 u64 block; /* block, within the file system scope */
2373 gfs2_set_alloc_start(&rbm, ip, dinode);
2374 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
2376 if (error == -ENOSPC) {
2377 gfs2_set_alloc_start(&rbm, ip, dinode);
2378 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
2381 /* Since all blocks are reserved in advance, this shouldn't happen */
2383 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2384 (unsigned long long)ip->i_no_addr, error, *nblocks,
2385 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2386 rbm.rgd->rd_extfail_pt);
2390 gfs2_alloc_extent(&rbm, dinode, nblocks);
2391 block = gfs2_rbm_to_block(&rbm);
2392 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2393 if (gfs2_rs_active(&ip->i_res))
2394 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2400 ip->i_goal = block + ndata - 1;
2401 error = gfs2_meta_inode_buffer(ip, &dibh);
2403 struct gfs2_dinode *di =
2404 (struct gfs2_dinode *)dibh->b_data;
2405 gfs2_trans_add_meta(ip->i_gl, dibh);
2406 di->di_goal_meta = di->di_goal_data =
2407 cpu_to_be64(ip->i_goal);
2411 if (rbm.rgd->rd_free < *nblocks) {
2412 pr_warn("nblocks=%u\n", *nblocks);
2416 rbm.rgd->rd_free -= *nblocks;
2418 rbm.rgd->rd_dinodes++;
2419 *generation = rbm.rgd->rd_igeneration++;
2420 if (*generation == 0)
2421 *generation = rbm.rgd->rd_igeneration++;
2424 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2425 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2427 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2429 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
2431 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2433 rbm.rgd->rd_free_clone -= *nblocks;
2434 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2435 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2440 gfs2_rgrp_error(rbm.rgd);
2445 * __gfs2_free_blocks - free a contiguous run of block(s)
2446 * @ip: the inode these blocks are being freed from
2447 * @bstart: first block of a run of contiguous blocks
2448 * @blen: the length of the block run
2449 * @meta: 1 if the blocks represent metadata
2453 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2455 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2456 struct gfs2_rgrpd *rgd;
2458 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2461 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2462 rgd->rd_free += blen;
2463 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2464 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2465 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2467 /* Directories keep their data in the metadata address space */
2468 if (meta || ip->i_depth)
2469 gfs2_meta_wipe(ip, bstart, blen);
2473 * gfs2_free_meta - free a contiguous run of data block(s)
2474 * @ip: the inode these blocks are being freed from
2475 * @bstart: first block of a run of contiguous blocks
2476 * @blen: the length of the block run
2480 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2482 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2484 __gfs2_free_blocks(ip, bstart, blen, 1);
2485 gfs2_statfs_change(sdp, 0, +blen, 0);
2486 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2489 void gfs2_unlink_di(struct inode *inode)
2491 struct gfs2_inode *ip = GFS2_I(inode);
2492 struct gfs2_sbd *sdp = GFS2_SB(inode);
2493 struct gfs2_rgrpd *rgd;
2494 u64 blkno = ip->i_no_addr;
2496 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2499 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2500 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2501 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2502 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
2505 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2507 struct gfs2_sbd *sdp = rgd->rd_sbd;
2508 struct gfs2_rgrpd *tmp_rgd;
2510 tmp_rgd = rgblk_free(sdp, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2513 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2515 if (!rgd->rd_dinodes)
2516 gfs2_consist_rgrpd(rgd);
2520 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2521 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2522 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
2524 gfs2_statfs_change(sdp, 0, +1, -1);
2525 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2526 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2527 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2531 * gfs2_check_blk_type - Check the type of a block
2532 * @sdp: The superblock
2533 * @no_addr: The block number to check
2534 * @type: The block type we are looking for
2536 * Returns: 0 if the block type matches the expected type
2537 * -ESTALE if it doesn't match
2538 * or -ve errno if something went wrong while checking
2541 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2543 struct gfs2_rgrpd *rgd;
2544 struct gfs2_holder rgd_gh;
2545 struct gfs2_rbm rbm;
2546 int error = -EINVAL;
2548 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2552 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2557 error = gfs2_rbm_from_block(&rbm, no_addr);
2558 WARN_ON_ONCE(error != 0);
2560 if (gfs2_testbit(&rbm, false) != type)
2563 gfs2_glock_dq_uninit(&rgd_gh);
2569 * gfs2_rlist_add - add a RG to a list of RGs
2571 * @rlist: the list of resource groups
2574 * Figure out what RG a block belongs to and add that RG to the list
2576 * FIXME: Don't use NOFAIL
2580 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2583 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2584 struct gfs2_rgrpd *rgd;
2585 struct gfs2_rgrpd **tmp;
2586 unsigned int new_space;
2589 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2593 * The resource group last accessed is kept in the last position.
2596 if (rlist->rl_rgrps) {
2597 rgd = rlist->rl_rgd[rlist->rl_rgrps - 1];
2598 if (rgrp_contains_block(rgd, block))
2600 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2602 rgd = ip->i_res.rs_rbm.rgd;
2603 if (!rgd || !rgrp_contains_block(rgd, block))
2604 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2608 fs_err(sdp, "rlist_add: no rgrp for block %llu\n",
2609 (unsigned long long)block);
2613 for (x = 0; x < rlist->rl_rgrps; x++) {
2614 if (rlist->rl_rgd[x] == rgd) {
2615 swap(rlist->rl_rgd[x],
2616 rlist->rl_rgd[rlist->rl_rgrps - 1]);
2621 if (rlist->rl_rgrps == rlist->rl_space) {
2622 new_space = rlist->rl_space + 10;
2624 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2625 GFP_NOFS | __GFP_NOFAIL);
2627 if (rlist->rl_rgd) {
2628 memcpy(tmp, rlist->rl_rgd,
2629 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2630 kfree(rlist->rl_rgd);
2633 rlist->rl_space = new_space;
2634 rlist->rl_rgd = tmp;
2637 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2641 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2642 * and initialize an array of glock holders for them
2643 * @rlist: the list of resource groups
2644 * @state: the lock state to acquire the RG lock in
2646 * FIXME: Don't use NOFAIL
2650 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2654 rlist->rl_ghs = kmalloc_array(rlist->rl_rgrps,
2655 sizeof(struct gfs2_holder),
2656 GFP_NOFS | __GFP_NOFAIL);
2657 for (x = 0; x < rlist->rl_rgrps; x++)
2658 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2664 * gfs2_rlist_free - free a resource group list
2665 * @rlist: the list of resource groups
2669 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2673 kfree(rlist->rl_rgd);
2675 if (rlist->rl_ghs) {
2676 for (x = 0; x < rlist->rl_rgrps; x++)
2677 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2678 kfree(rlist->rl_ghs);
2679 rlist->rl_ghs = NULL;