2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/prefetch.h>
19 #include <linux/blkdev.h>
20 #include <linux/rbtree.h>
21 #include <linux/random.h>
36 #include "trace_gfs2.h"
38 #define BFITNOENT ((u32)~0)
39 #define NO_BLOCK ((u64)~0)
41 #if BITS_PER_LONG == 32
42 #define LBITMASK (0x55555555UL)
43 #define LBITSKIP55 (0x55555555UL)
44 #define LBITSKIP00 (0x00000000UL)
46 #define LBITMASK (0x5555555555555555UL)
47 #define LBITSKIP55 (0x5555555555555555UL)
48 #define LBITSKIP00 (0x0000000000000000UL)
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
54 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
67 static const char valid_change[16] = {
75 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
76 const struct gfs2_inode *ip, bool nowrap);
80 * gfs2_setbit - Set a bit in the bitmaps
81 * @rbm: The position of the bit to set
82 * @do_clone: Also set the clone bitmap, if it exists
83 * @new_state: the new state of the block
87 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
88 unsigned char new_state)
90 unsigned char *byte1, *byte2, *end, cur_state;
91 struct gfs2_bitmap *bi = rbm_bi(rbm);
92 unsigned int buflen = bi->bi_len;
93 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
95 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
96 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
100 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
102 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
103 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
104 rbm->offset, cur_state, new_state);
105 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
106 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
107 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
108 bi->bi_offset, bi->bi_len);
110 gfs2_consist_rgrpd(rbm->rgd);
113 *byte1 ^= (cur_state ^ new_state) << bit;
115 if (do_clone && bi->bi_clone) {
116 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
117 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
118 *byte2 ^= (cur_state ^ new_state) << bit;
123 * gfs2_testbit - test a bit in the bitmaps
124 * @rbm: The bit to test
126 * Returns: The two bit block state of the requested bit
129 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
131 struct gfs2_bitmap *bi = rbm_bi(rbm);
132 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
136 byte = buffer + (rbm->offset / GFS2_NBBY);
137 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
139 return (*byte >> bit) & GFS2_BIT_MASK;
144 * @ptr: Pointer to bitmap data
145 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
146 * @state: The state we are searching for
148 * We xor the bitmap data with a patter which is the bitwise opposite
149 * of what we are looking for, this gives rise to a pattern of ones
150 * wherever there is a match. Since we have two bits per entry, we
151 * take this pattern, shift it down by one place and then and it with
152 * the original. All the even bit positions (0,2,4, etc) then represent
153 * successful matches, so we mask with 0x55555..... to remove the unwanted
156 * This allows searching of a whole u64 at once (32 blocks) with a
157 * single test (on 64 bit arches).
160 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
163 static const u64 search[] = {
164 [0] = 0xffffffffffffffffULL,
165 [1] = 0xaaaaaaaaaaaaaaaaULL,
166 [2] = 0x5555555555555555ULL,
167 [3] = 0x0000000000000000ULL,
169 tmp = le64_to_cpu(*ptr) ^ search[state];
176 * rs_cmp - multi-block reservation range compare
177 * @blk: absolute file system block number of the new reservation
178 * @len: number of blocks in the new reservation
179 * @rs: existing reservation to compare against
181 * returns: 1 if the block range is beyond the reach of the reservation
182 * -1 if the block range is before the start of the reservation
183 * 0 if the block range overlaps with the reservation
185 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
187 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
189 if (blk >= startblk + rs->rs_free)
191 if (blk + len - 1 < startblk)
197 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
198 * a block in a given allocation state.
199 * @buf: the buffer that holds the bitmaps
200 * @len: the length (in bytes) of the buffer
201 * @goal: start search at this block's bit-pair (within @buffer)
202 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
204 * Scope of @goal and returned block number is only within this bitmap buffer,
205 * not entire rgrp or filesystem. @buffer will be offset from the actual
206 * beginning of a bitmap block buffer, skipping any header structures, but
207 * headers are always a multiple of 64 bits long so that the buffer is
208 * always aligned to a 64 bit boundary.
210 * The size of the buffer is in bytes, but is it assumed that it is
211 * always ok to read a complete multiple of 64 bits at the end
212 * of the block in case the end is no aligned to a natural boundary.
214 * Return: the block number (bitmap buffer scope) that was found
217 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
220 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
221 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
222 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
224 u64 mask = 0x5555555555555555ULL;
227 /* Mask off bits we don't care about at the start of the search */
229 tmp = gfs2_bit_search(ptr, mask, state);
231 while(tmp == 0 && ptr < end) {
232 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
235 /* Mask off any bits which are more than len bytes from the start */
236 if (ptr == end && (len & (sizeof(u64) - 1)))
237 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
238 /* Didn't find anything, so return */
243 bit /= 2; /* two bits per entry in the bitmap */
244 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
248 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
249 * @rbm: The rbm with rgd already set correctly
250 * @block: The block number (filesystem relative)
252 * This sets the bi and offset members of an rbm based on a
253 * resource group and a filesystem relative block number. The
254 * resource group must be set in the rbm on entry, the bi and
255 * offset members will be set by this function.
257 * Returns: 0 on success, or an error code
260 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
262 u64 rblock = block - rbm->rgd->rd_data0;
264 if (WARN_ON_ONCE(rblock > UINT_MAX))
266 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
270 rbm->offset = (u32)(rblock);
271 /* Check if the block is within the first block */
272 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
275 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
276 rbm->offset += (sizeof(struct gfs2_rgrp) -
277 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
278 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
279 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
284 * gfs2_rbm_incr - increment an rbm structure
285 * @rbm: The rbm with rgd already set correctly
287 * This function takes an existing rbm structure and increments it to the next
288 * viable block offset.
290 * Returns: If incrementing the offset would cause the rbm to go past the
291 * end of the rgrp, true is returned, otherwise false.
295 static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
297 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
301 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
310 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
311 * @rbm: Position to search (value/result)
312 * @n_unaligned: Number of unaligned blocks to check
313 * @len: Decremented for each block found (terminate on zero)
315 * Returns: true if a non-free block is encountered
318 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
323 for (n = 0; n < n_unaligned; n++) {
324 res = gfs2_testbit(rbm);
325 if (res != GFS2_BLKST_FREE)
330 if (gfs2_rbm_incr(rbm))
338 * gfs2_free_extlen - Return extent length of free blocks
339 * @rrbm: Starting position
340 * @len: Max length to check
342 * Starting at the block specified by the rbm, see how many free blocks
343 * there are, not reading more than len blocks ahead. This can be done
344 * using memchr_inv when the blocks are byte aligned, but has to be done
345 * on a block by block basis in case of unaligned blocks. Also this
346 * function can cope with bitmap boundaries (although it must stop on
347 * a resource group boundary)
349 * Returns: Number of free blocks in the extent
352 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
354 struct gfs2_rbm rbm = *rrbm;
355 u32 n_unaligned = rbm.offset & 3;
359 u8 *ptr, *start, *end;
361 struct gfs2_bitmap *bi;
364 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
367 n_unaligned = len & 3;
368 /* Start is now byte aligned */
371 start = bi->bi_bh->b_data;
373 start = bi->bi_clone;
374 end = start + bi->bi_bh->b_size;
375 start += bi->bi_offset;
376 BUG_ON(rbm.offset & 3);
377 start += (rbm.offset / GFS2_NBBY);
378 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
379 ptr = memchr_inv(start, 0, bytes);
380 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
381 chunk_size *= GFS2_NBBY;
382 BUG_ON(len < chunk_size);
384 block = gfs2_rbm_to_block(&rbm);
385 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
393 n_unaligned = len & 3;
396 /* Deal with any bits left over at the end */
398 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
404 * gfs2_bitcount - count the number of bits in a certain state
405 * @rgd: the resource group descriptor
406 * @buffer: the buffer that holds the bitmaps
407 * @buflen: the length (in bytes) of the buffer
408 * @state: the state of the block we're looking for
410 * Returns: The number of bits
413 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
414 unsigned int buflen, u8 state)
416 const u8 *byte = buffer;
417 const u8 *end = buffer + buflen;
418 const u8 state1 = state << 2;
419 const u8 state2 = state << 4;
420 const u8 state3 = state << 6;
423 for (; byte < end; byte++) {
424 if (((*byte) & 0x03) == state)
426 if (((*byte) & 0x0C) == state1)
428 if (((*byte) & 0x30) == state2)
430 if (((*byte) & 0xC0) == state3)
438 * gfs2_rgrp_verify - Verify that a resource group is consistent
443 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
445 struct gfs2_sbd *sdp = rgd->rd_sbd;
446 struct gfs2_bitmap *bi = NULL;
447 u32 length = rgd->rd_length;
451 memset(count, 0, 4 * sizeof(u32));
453 /* Count # blocks in each of 4 possible allocation states */
454 for (buf = 0; buf < length; buf++) {
455 bi = rgd->rd_bits + buf;
456 for (x = 0; x < 4; x++)
457 count[x] += gfs2_bitcount(rgd,
463 if (count[0] != rgd->rd_free) {
464 if (gfs2_consist_rgrpd(rgd))
465 fs_err(sdp, "free data mismatch: %u != %u\n",
466 count[0], rgd->rd_free);
470 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
471 if (count[1] != tmp) {
472 if (gfs2_consist_rgrpd(rgd))
473 fs_err(sdp, "used data mismatch: %u != %u\n",
478 if (count[2] + count[3] != rgd->rd_dinodes) {
479 if (gfs2_consist_rgrpd(rgd))
480 fs_err(sdp, "used metadata mismatch: %u != %u\n",
481 count[2] + count[3], rgd->rd_dinodes);
487 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
488 * @sdp: The GFS2 superblock
489 * @blk: The data block number
490 * @exact: True if this needs to be an exact match
492 * Returns: The resource group, or NULL if not found
495 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
497 struct rb_node *n, *next;
498 struct gfs2_rgrpd *cur;
500 spin_lock(&sdp->sd_rindex_spin);
501 n = sdp->sd_rindex_tree.rb_node;
503 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
505 if (blk < cur->rd_addr)
507 else if (blk >= cur->rd_data0 + cur->rd_data)
510 spin_unlock(&sdp->sd_rindex_spin);
512 if (blk < cur->rd_addr)
514 if (blk >= cur->rd_data0 + cur->rd_data)
521 spin_unlock(&sdp->sd_rindex_spin);
527 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
528 * @sdp: The GFS2 superblock
530 * Returns: The first rgrp in the filesystem
533 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
535 const struct rb_node *n;
536 struct gfs2_rgrpd *rgd;
538 spin_lock(&sdp->sd_rindex_spin);
539 n = rb_first(&sdp->sd_rindex_tree);
540 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
541 spin_unlock(&sdp->sd_rindex_spin);
547 * gfs2_rgrpd_get_next - get the next RG
548 * @rgd: the resource group descriptor
550 * Returns: The next rgrp
553 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
555 struct gfs2_sbd *sdp = rgd->rd_sbd;
556 const struct rb_node *n;
558 spin_lock(&sdp->sd_rindex_spin);
559 n = rb_next(&rgd->rd_node);
561 n = rb_first(&sdp->sd_rindex_tree);
563 if (unlikely(&rgd->rd_node == n)) {
564 spin_unlock(&sdp->sd_rindex_spin);
567 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
568 spin_unlock(&sdp->sd_rindex_spin);
572 void check_and_update_goal(struct gfs2_inode *ip)
574 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
575 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
576 ip->i_goal = ip->i_no_addr;
579 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
583 for (x = 0; x < rgd->rd_length; x++) {
584 struct gfs2_bitmap *bi = rgd->rd_bits + x;
591 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
592 * plus a quota allocations data structure, if necessary
593 * @ip: the inode for this reservation
595 int gfs2_rsqa_alloc(struct gfs2_inode *ip)
597 return gfs2_qa_alloc(ip);
600 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
602 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
603 (unsigned long long)rs->rs_inum,
604 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
605 rs->rs_rbm.offset, rs->rs_free);
609 * __rs_deltree - remove a multi-block reservation from the rgd tree
610 * @rs: The reservation to remove
613 static void __rs_deltree(struct gfs2_blkreserv *rs)
615 struct gfs2_rgrpd *rgd;
617 if (!gfs2_rs_active(rs))
620 rgd = rs->rs_rbm.rgd;
621 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
622 rb_erase(&rs->rs_node, &rgd->rd_rstree);
623 RB_CLEAR_NODE(&rs->rs_node);
626 u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
628 struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
629 struct gfs2_bitmap *start, *last;
631 /* return reserved blocks to the rgrp */
632 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
633 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
634 /* The rgrp extent failure point is likely not to increase;
635 it will only do so if the freed blocks are somehow
636 contiguous with a span of free blocks that follows. Still,
637 it will force the number to be recalculated later. */
638 rgd->rd_extfail_pt += rs->rs_free;
640 if (gfs2_rbm_from_block(&last_rbm, last_block))
642 start = rbm_bi(&rs->rs_rbm);
643 last = rbm_bi(&last_rbm);
645 clear_bit(GBF_FULL, &start->bi_flags);
646 while (start++ != last);
651 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
652 * @rs: The reservation to remove
655 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
657 struct gfs2_rgrpd *rgd;
659 rgd = rs->rs_rbm.rgd;
661 spin_lock(&rgd->rd_rsspin);
664 spin_unlock(&rgd->rd_rsspin);
669 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
670 * @ip: The inode for this reservation
671 * @wcount: The inode's write count, or NULL
674 void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
676 down_write(&ip->i_rw_mutex);
677 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
678 gfs2_rs_deltree(&ip->i_res);
679 up_write(&ip->i_rw_mutex);
680 gfs2_qa_delete(ip, wcount);
684 * return_all_reservations - return all reserved blocks back to the rgrp.
685 * @rgd: the rgrp that needs its space back
687 * We previously reserved a bunch of blocks for allocation. Now we need to
688 * give them back. This leave the reservation structures in tact, but removes
689 * all of their corresponding "no-fly zones".
691 static void return_all_reservations(struct gfs2_rgrpd *rgd)
694 struct gfs2_blkreserv *rs;
696 spin_lock(&rgd->rd_rsspin);
697 while ((n = rb_first(&rgd->rd_rstree))) {
698 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
701 spin_unlock(&rgd->rd_rsspin);
704 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
707 struct gfs2_rgrpd *rgd;
708 struct gfs2_glock *gl;
710 while ((n = rb_first(&sdp->sd_rindex_tree))) {
711 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
714 rb_erase(n, &sdp->sd_rindex_tree);
717 glock_clear_object(gl, rgd);
718 gfs2_rgrp_brelse(rgd);
722 gfs2_free_clones(rgd);
723 return_all_reservations(rgd);
726 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
730 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
732 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
733 pr_info("ri_length = %u\n", rgd->rd_length);
734 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
735 pr_info("ri_data = %u\n", rgd->rd_data);
736 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
740 * gfs2_compute_bitstructs - Compute the bitmap sizes
741 * @rgd: The resource group descriptor
743 * Calculates bitmap descriptors, one for each block that contains bitmap data
748 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
750 struct gfs2_sbd *sdp = rgd->rd_sbd;
751 struct gfs2_bitmap *bi;
752 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
753 u32 bytes_left, bytes;
759 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
763 bytes_left = rgd->rd_bitbytes;
765 for (x = 0; x < length; x++) {
766 bi = rgd->rd_bits + x;
769 /* small rgrp; bitmap stored completely in header block */
772 bi->bi_offset = sizeof(struct gfs2_rgrp);
775 bi->bi_blocks = bytes * GFS2_NBBY;
778 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
779 bi->bi_offset = sizeof(struct gfs2_rgrp);
782 bi->bi_blocks = bytes * GFS2_NBBY;
784 } else if (x + 1 == length) {
786 bi->bi_offset = sizeof(struct gfs2_meta_header);
787 bi->bi_start = rgd->rd_bitbytes - bytes_left;
789 bi->bi_blocks = bytes * GFS2_NBBY;
792 bytes = sdp->sd_sb.sb_bsize -
793 sizeof(struct gfs2_meta_header);
794 bi->bi_offset = sizeof(struct gfs2_meta_header);
795 bi->bi_start = rgd->rd_bitbytes - bytes_left;
797 bi->bi_blocks = bytes * GFS2_NBBY;
804 gfs2_consist_rgrpd(rgd);
807 bi = rgd->rd_bits + (length - 1);
808 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
809 if (gfs2_consist_rgrpd(rgd)) {
810 gfs2_rindex_print(rgd);
811 fs_err(sdp, "start=%u len=%u offset=%u\n",
812 bi->bi_start, bi->bi_len, bi->bi_offset);
821 * gfs2_ri_total - Total up the file system space, according to the rindex.
822 * @sdp: the filesystem
825 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
828 struct inode *inode = sdp->sd_rindex;
829 struct gfs2_inode *ip = GFS2_I(inode);
830 char buf[sizeof(struct gfs2_rindex)];
833 for (rgrps = 0;; rgrps++) {
834 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
836 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
838 error = gfs2_internal_read(ip, buf, &pos,
839 sizeof(struct gfs2_rindex));
840 if (error != sizeof(struct gfs2_rindex))
842 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
847 static int rgd_insert(struct gfs2_rgrpd *rgd)
849 struct gfs2_sbd *sdp = rgd->rd_sbd;
850 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
852 /* Figure out where to put new node */
854 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
858 if (rgd->rd_addr < cur->rd_addr)
859 newn = &((*newn)->rb_left);
860 else if (rgd->rd_addr > cur->rd_addr)
861 newn = &((*newn)->rb_right);
866 rb_link_node(&rgd->rd_node, parent, newn);
867 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
873 * read_rindex_entry - Pull in a new resource index entry from the disk
874 * @ip: Pointer to the rindex inode
876 * Returns: 0 on success, > 0 on EOF, error code otherwise
879 static int read_rindex_entry(struct gfs2_inode *ip)
881 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
882 const unsigned bsize = sdp->sd_sb.sb_bsize;
883 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
884 struct gfs2_rindex buf;
886 struct gfs2_rgrpd *rgd;
888 if (pos >= i_size_read(&ip->i_inode))
891 error = gfs2_internal_read(ip, (char *)&buf, &pos,
892 sizeof(struct gfs2_rindex));
894 if (error != sizeof(struct gfs2_rindex))
895 return (error == 0) ? 1 : error;
897 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
903 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
904 rgd->rd_length = be32_to_cpu(buf.ri_length);
905 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
906 rgd->rd_data = be32_to_cpu(buf.ri_data);
907 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
908 spin_lock_init(&rgd->rd_rsspin);
910 error = gfs2_glock_get(sdp, rgd->rd_addr,
911 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
915 error = compute_bitstructs(rgd);
919 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
920 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
921 if (rgd->rd_data > sdp->sd_max_rg_data)
922 sdp->sd_max_rg_data = rgd->rd_data;
923 spin_lock(&sdp->sd_rindex_spin);
924 error = rgd_insert(rgd);
925 spin_unlock(&sdp->sd_rindex_spin);
927 glock_set_object(rgd->rd_gl, rgd);
928 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
929 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
930 rgd->rd_length) * bsize) - 1;
934 error = 0; /* someone else read in the rgrp; free it and ignore it */
936 gfs2_glock_put(rgd->rd_gl);
941 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
946 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
947 * @sdp: the GFS2 superblock
949 * The purpose of this function is to select a subset of the resource groups
950 * and mark them as PREFERRED. We do it in such a way that each node prefers
951 * to use a unique set of rgrps to minimize glock contention.
953 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
955 struct gfs2_rgrpd *rgd, *first;
958 /* Skip an initial number of rgrps, based on this node's journal ID.
959 That should start each node out on its own set. */
960 rgd = gfs2_rgrpd_get_first(sdp);
961 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
962 rgd = gfs2_rgrpd_get_next(rgd);
966 rgd->rd_flags |= GFS2_RDF_PREFERRED;
967 for (i = 0; i < sdp->sd_journals; i++) {
968 rgd = gfs2_rgrpd_get_next(rgd);
969 if (!rgd || rgd == first)
972 } while (rgd && rgd != first);
976 * gfs2_ri_update - Pull in a new resource index from the disk
977 * @ip: pointer to the rindex inode
979 * Returns: 0 on successful update, error code otherwise
982 static int gfs2_ri_update(struct gfs2_inode *ip)
984 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
988 error = read_rindex_entry(ip);
989 } while (error == 0);
994 if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
995 fs_err(sdp, "no resource groups found in the file system.\n");
998 set_rgrp_preferences(sdp);
1000 sdp->sd_rindex_uptodate = 1;
1005 * gfs2_rindex_update - Update the rindex if required
1006 * @sdp: The GFS2 superblock
1008 * We grab a lock on the rindex inode to make sure that it doesn't
1009 * change whilst we are performing an operation. We keep this lock
1010 * for quite long periods of time compared to other locks. This
1011 * doesn't matter, since it is shared and it is very, very rarely
1012 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1014 * This makes sure that we're using the latest copy of the resource index
1015 * special file, which might have been updated if someone expanded the
1016 * filesystem (via gfs2_grow utility), which adds new resource groups.
1018 * Returns: 0 on succeess, error code otherwise
1021 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1023 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1024 struct gfs2_glock *gl = ip->i_gl;
1025 struct gfs2_holder ri_gh;
1027 int unlock_required = 0;
1029 /* Read new copy from disk if we don't have the latest */
1030 if (!sdp->sd_rindex_uptodate) {
1031 if (!gfs2_glock_is_locked_by_me(gl)) {
1032 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1035 unlock_required = 1;
1037 if (!sdp->sd_rindex_uptodate)
1038 error = gfs2_ri_update(ip);
1039 if (unlock_required)
1040 gfs2_glock_dq_uninit(&ri_gh);
1046 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1048 const struct gfs2_rgrp *str = buf;
1051 rg_flags = be32_to_cpu(str->rg_flags);
1052 rg_flags &= ~GFS2_RDF_MASK;
1053 rgd->rd_flags &= GFS2_RDF_MASK;
1054 rgd->rd_flags |= rg_flags;
1055 rgd->rd_free = be32_to_cpu(str->rg_free);
1056 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1057 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1060 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1062 struct gfs2_rgrp *str = buf;
1064 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1065 str->rg_free = cpu_to_be32(rgd->rd_free);
1066 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1067 str->__pad = cpu_to_be32(0);
1068 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1069 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1072 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1074 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1075 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1077 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1078 rgl->rl_dinodes != str->rg_dinodes ||
1079 rgl->rl_igeneration != str->rg_igeneration)
1084 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1086 const struct gfs2_rgrp *str = buf;
1088 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1089 rgl->rl_flags = str->rg_flags;
1090 rgl->rl_free = str->rg_free;
1091 rgl->rl_dinodes = str->rg_dinodes;
1092 rgl->rl_igeneration = str->rg_igeneration;
1096 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1098 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1099 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1100 rgl->rl_unlinked = cpu_to_be32(unlinked);
1103 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1105 struct gfs2_bitmap *bi;
1106 const u32 length = rgd->rd_length;
1107 const u8 *buffer = NULL;
1108 u32 i, goal, count = 0;
1110 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1112 buffer = bi->bi_bh->b_data + bi->bi_offset;
1113 WARN_ON(!buffer_uptodate(bi->bi_bh));
1114 while (goal < bi->bi_len * GFS2_NBBY) {
1115 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1116 GFS2_BLKST_UNLINKED);
1117 if (goal == BFITNOENT)
1129 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1130 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1132 * Read in all of a Resource Group's header and bitmap blocks.
1133 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1138 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1140 struct gfs2_sbd *sdp = rgd->rd_sbd;
1141 struct gfs2_glock *gl = rgd->rd_gl;
1142 unsigned int length = rgd->rd_length;
1143 struct gfs2_bitmap *bi;
1147 if (rgd->rd_bits[0].bi_bh != NULL)
1150 for (x = 0; x < length; x++) {
1151 bi = rgd->rd_bits + x;
1152 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1157 for (y = length; y--;) {
1158 bi = rgd->rd_bits + y;
1159 error = gfs2_meta_wait(sdp, bi->bi_bh);
1162 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1163 GFS2_METATYPE_RG)) {
1169 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1170 for (x = 0; x < length; x++)
1171 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1172 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1173 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1174 rgd->rd_free_clone = rgd->rd_free;
1175 /* max out the rgrp allocation failure point */
1176 rgd->rd_extfail_pt = rgd->rd_free;
1178 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1179 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1180 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1181 rgd->rd_bits[0].bi_bh->b_data);
1183 else if (sdp->sd_args.ar_rgrplvb) {
1184 if (!gfs2_rgrp_lvb_valid(rgd)){
1185 gfs2_consist_rgrpd(rgd);
1189 if (rgd->rd_rgl->rl_unlinked == 0)
1190 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1196 bi = rgd->rd_bits + x;
1199 gfs2_assert_warn(sdp, !bi->bi_clone);
1205 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1209 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1212 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1213 return gfs2_rgrp_bh_get(rgd);
1215 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1216 rl_flags &= ~GFS2_RDF_MASK;
1217 rgd->rd_flags &= GFS2_RDF_MASK;
1218 rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
1219 if (rgd->rd_rgl->rl_unlinked == 0)
1220 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1221 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1222 rgd->rd_free_clone = rgd->rd_free;
1223 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1224 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1228 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1230 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1231 struct gfs2_sbd *sdp = rgd->rd_sbd;
1233 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1235 return gfs2_rgrp_bh_get(rgd);
1239 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1240 * @rgd: The resource group
1244 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1246 int x, length = rgd->rd_length;
1248 for (x = 0; x < length; x++) {
1249 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1259 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1260 * @gh: The glock holder for the resource group
1264 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1266 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1267 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1268 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1270 if (rgd && demote_requested)
1271 gfs2_rgrp_brelse(rgd);
1274 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1275 struct buffer_head *bh,
1276 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1278 struct super_block *sb = sdp->sd_vfs;
1281 sector_t nr_blks = 0;
1287 for (x = 0; x < bi->bi_len; x++) {
1288 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1289 clone += bi->bi_offset;
1292 const u8 *orig = bh->b_data + bi->bi_offset + x;
1293 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1295 diff = ~(*clone | (*clone >> 1));
1300 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1304 goto start_new_extent;
1305 if ((start + nr_blks) != blk) {
1306 if (nr_blks >= minlen) {
1307 rv = sb_issue_discard(sb,
1324 if (nr_blks >= minlen) {
1325 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1331 *ptrimmed = trimmed;
1335 if (sdp->sd_args.ar_discard)
1336 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1337 sdp->sd_args.ar_discard = 0;
1342 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1343 * @filp: Any file on the filesystem
1344 * @argp: Pointer to the arguments (also used to pass result)
1346 * Returns: 0 on success, otherwise error code
1349 int gfs2_fitrim(struct file *filp, void __user *argp)
1351 struct inode *inode = file_inode(filp);
1352 struct gfs2_sbd *sdp = GFS2_SB(inode);
1353 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1354 struct buffer_head *bh;
1355 struct gfs2_rgrpd *rgd;
1356 struct gfs2_rgrpd *rgd_end;
1357 struct gfs2_holder gh;
1358 struct fstrim_range r;
1362 u64 start, end, minlen;
1364 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1366 if (!capable(CAP_SYS_ADMIN))
1369 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1372 if (!blk_queue_discard(q))
1375 if (copy_from_user(&r, argp, sizeof(r)))
1378 ret = gfs2_rindex_update(sdp);
1382 start = r.start >> bs_shift;
1383 end = start + (r.len >> bs_shift);
1384 minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
1385 minlen = max_t(u64, minlen,
1386 q->limits.discard_granularity) >> bs_shift;
1388 if (end <= start || minlen > sdp->sd_max_rg_data)
1391 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1392 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1394 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1395 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1396 return -EINVAL; /* start is beyond the end of the fs */
1400 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1404 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1405 /* Trim each bitmap in the rgrp */
1406 for (x = 0; x < rgd->rd_length; x++) {
1407 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1408 ret = gfs2_rgrp_send_discards(sdp,
1409 rgd->rd_data0, NULL, bi, minlen,
1412 gfs2_glock_dq_uninit(&gh);
1418 /* Mark rgrp as having been trimmed */
1419 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1421 bh = rgd->rd_bits[0].bi_bh;
1422 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1423 gfs2_trans_add_meta(rgd->rd_gl, bh);
1424 gfs2_rgrp_out(rgd, bh->b_data);
1425 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1426 gfs2_trans_end(sdp);
1429 gfs2_glock_dq_uninit(&gh);
1434 rgd = gfs2_rgrpd_get_next(rgd);
1438 r.len = trimmed << bs_shift;
1439 if (copy_to_user(argp, &r, sizeof(r)))
1446 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1447 * @ip: the inode structure
1450 static void rs_insert(struct gfs2_inode *ip)
1452 struct rb_node **newn, *parent = NULL;
1454 struct gfs2_blkreserv *rs = &ip->i_res;
1455 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1456 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1458 BUG_ON(gfs2_rs_active(rs));
1460 spin_lock(&rgd->rd_rsspin);
1461 newn = &rgd->rd_rstree.rb_node;
1463 struct gfs2_blkreserv *cur =
1464 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1467 rc = rs_cmp(fsblock, rs->rs_free, cur);
1469 newn = &((*newn)->rb_right);
1471 newn = &((*newn)->rb_left);
1473 spin_unlock(&rgd->rd_rsspin);
1479 rb_link_node(&rs->rs_node, parent, newn);
1480 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1482 /* Do our rgrp accounting for the reservation */
1483 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1484 spin_unlock(&rgd->rd_rsspin);
1485 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1489 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1490 * @rgd: the resource group descriptor
1491 * @ip: pointer to the inode for which we're reserving blocks
1492 * @ap: the allocation parameters
1496 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1497 const struct gfs2_alloc_parms *ap)
1499 struct gfs2_rbm rbm = { .rgd = rgd, };
1501 struct gfs2_blkreserv *rs = &ip->i_res;
1503 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1505 struct inode *inode = &ip->i_inode;
1507 if (S_ISDIR(inode->i_mode))
1510 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1511 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1513 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1516 /* Find bitmap block that contains bits for goal block */
1517 if (rgrp_contains_block(rgd, ip->i_goal))
1520 goal = rgd->rd_last_alloc + rgd->rd_data0;
1522 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1525 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
1528 rs->rs_free = extlen;
1529 rs->rs_inum = ip->i_no_addr;
1532 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1533 rgd->rd_last_alloc = 0;
1538 * gfs2_next_unreserved_block - Return next block that is not reserved
1539 * @rgd: The resource group
1540 * @block: The starting block
1541 * @length: The required length
1542 * @ip: Ignore any reservations for this inode
1544 * If the block does not appear in any reservation, then return the
1545 * block number unchanged. If it does appear in the reservation, then
1546 * keep looking through the tree of reservations in order to find the
1547 * first block number which is not reserved.
1550 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1552 const struct gfs2_inode *ip)
1554 struct gfs2_blkreserv *rs;
1558 spin_lock(&rgd->rd_rsspin);
1559 n = rgd->rd_rstree.rb_node;
1561 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1562 rc = rs_cmp(block, length, rs);
1572 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
1573 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1577 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1581 spin_unlock(&rgd->rd_rsspin);
1586 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1587 * @rbm: The current position in the resource group
1588 * @ip: The inode for which we are searching for blocks
1589 * @minext: The minimum extent length
1590 * @maxext: A pointer to the maximum extent structure
1592 * This checks the current position in the rgrp to see whether there is
1593 * a reservation covering this block. If not then this function is a
1594 * no-op. If there is, then the position is moved to the end of the
1595 * contiguous reservation(s) so that we are pointing at the first
1596 * non-reserved block.
1598 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1601 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1602 const struct gfs2_inode *ip,
1604 struct gfs2_extent *maxext)
1606 u64 block = gfs2_rbm_to_block(rbm);
1612 * If we have a minimum extent length, then skip over any extent
1613 * which is less than the min extent length in size.
1616 extlen = gfs2_free_extlen(rbm, minext);
1617 if (extlen <= maxext->len)
1622 * Check the extent which has been found against the reservations
1623 * and skip if parts of it are already reserved
1625 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1626 if (nblock == block) {
1627 if (!minext || extlen >= minext)
1630 if (extlen > maxext->len) {
1631 maxext->len = extlen;
1635 nblock = block + extlen;
1637 ret = gfs2_rbm_from_block(rbm, nblock);
1644 * gfs2_rbm_find - Look for blocks of a particular state
1645 * @rbm: Value/result starting position and final position
1646 * @state: The state which we want to find
1647 * @minext: Pointer to the requested extent length (NULL for a single block)
1648 * This is updated to be the actual reservation size.
1649 * @ip: If set, check for reservations
1650 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1651 * around until we've reached the starting point.
1654 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1655 * has no free blocks in it.
1656 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1657 * has come up short on a free block search.
1659 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1662 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1663 const struct gfs2_inode *ip, bool nowrap)
1665 struct buffer_head *bh;
1668 int first_bii = rbm->bii;
1669 u32 first_offset = rbm->offset;
1673 int iters = rbm->rgd->rd_length;
1675 struct gfs2_bitmap *bi;
1676 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1678 /* If we are not starting at the beginning of a bitmap, then we
1679 * need to add one to the bitmap count to ensure that we search
1680 * the starting bitmap twice.
1682 if (rbm->offset != 0)
1687 if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
1688 test_bit(GBF_FULL, &bi->bi_flags) &&
1689 (state == GFS2_BLKST_FREE))
1693 buffer = bh->b_data + bi->bi_offset;
1694 WARN_ON(!buffer_uptodate(bh));
1695 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1696 buffer = bi->bi_clone + bi->bi_offset;
1697 initial_offset = rbm->offset;
1698 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1699 if (offset == BFITNOENT)
1701 rbm->offset = offset;
1705 initial_bii = rbm->bii;
1706 ret = gfs2_reservation_check_and_update(rbm, ip,
1707 minext ? *minext : 0,
1712 n += (rbm->bii - initial_bii);
1715 if (ret == -E2BIG) {
1718 n += (rbm->bii - initial_bii);
1719 goto res_covered_end_of_rgrp;
1723 bitmap_full: /* Mark bitmap as full and fall through */
1724 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1725 set_bit(GBF_FULL, &bi->bi_flags);
1727 next_bitmap: /* Find next bitmap in the rgrp */
1730 if (rbm->bii == rbm->rgd->rd_length)
1732 res_covered_end_of_rgrp:
1733 if ((rbm->bii == 0) && nowrap)
1741 if (minext == NULL || state != GFS2_BLKST_FREE)
1744 /* If the extent was too small, and it's smaller than the smallest
1745 to have failed before, remember for future reference that it's
1746 useless to search this rgrp again for this amount or more. */
1747 if ((first_offset == 0) && (first_bii == 0) &&
1748 (*minext < rbm->rgd->rd_extfail_pt))
1749 rbm->rgd->rd_extfail_pt = *minext;
1751 /* If the maximum extent we found is big enough to fulfill the
1752 minimum requirements, use it anyway. */
1755 *minext = maxext.len;
1763 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1765 * @last_unlinked: block address of the last dinode we unlinked
1766 * @skip: block address we should explicitly not unlink
1768 * Returns: 0 if no error
1769 * The inode, if one has been found, in inode.
1772 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1775 struct gfs2_sbd *sdp = rgd->rd_sbd;
1776 struct gfs2_glock *gl;
1777 struct gfs2_inode *ip;
1780 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1783 down_write(&sdp->sd_log_flush_lock);
1784 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1786 up_write(&sdp->sd_log_flush_lock);
1787 if (error == -ENOSPC)
1789 if (WARN_ON_ONCE(error))
1792 block = gfs2_rbm_to_block(&rbm);
1793 if (gfs2_rbm_from_block(&rbm, block + 1))
1795 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1799 *last_unlinked = block;
1801 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1805 /* If the inode is already in cache, we can ignore it here
1806 * because the existing inode disposal code will deal with
1807 * it when all refs have gone away. Accessing gl_object like
1808 * this is not safe in general. Here it is ok because we do
1809 * not dereference the pointer, and we only need an approx
1810 * answer to whether it is NULL or not.
1814 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1819 /* Limit reclaim to sensible number of tasks */
1820 if (found > NR_CPUS)
1824 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1829 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1830 * @rgd: The rgrp in question
1831 * @loops: An indication of how picky we can be (0=very, 1=less so)
1833 * This function uses the recently added glock statistics in order to
1834 * figure out whether a parciular resource group is suffering from
1835 * contention from multiple nodes. This is done purely on the basis
1836 * of timings, since this is the only data we have to work with and
1837 * our aim here is to reject a resource group which is highly contended
1838 * but (very important) not to do this too often in order to ensure that
1839 * we do not land up introducing fragmentation by changing resource
1840 * groups when not actually required.
1842 * The calculation is fairly simple, we want to know whether the SRTTB
1843 * (i.e. smoothed round trip time for blocking operations) to acquire
1844 * the lock for this rgrp's glock is significantly greater than the
1845 * time taken for resource groups on average. We introduce a margin in
1846 * the form of the variable @var which is computed as the sum of the two
1847 * respective variences, and multiplied by a factor depending on @loops
1848 * and whether we have a lot of data to base the decision on. This is
1849 * then tested against the square difference of the means in order to
1850 * decide whether the result is statistically significant or not.
1852 * Returns: A boolean verdict on the congestion status
1855 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1857 const struct gfs2_glock *gl = rgd->rd_gl;
1858 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1859 struct gfs2_lkstats *st;
1860 u64 r_dcount, l_dcount;
1861 u64 l_srttb, a_srttb = 0;
1865 int cpu, nonzero = 0;
1868 for_each_present_cpu(cpu) {
1869 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1870 if (st->stats[GFS2_LKS_SRTTB]) {
1871 a_srttb += st->stats[GFS2_LKS_SRTTB];
1875 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1877 do_div(a_srttb, nonzero);
1878 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1879 var = st->stats[GFS2_LKS_SRTTVARB] +
1880 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1883 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1884 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1886 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1889 srttb_diff = a_srttb - l_srttb;
1890 sqr_diff = srttb_diff * srttb_diff;
1893 if (l_dcount < 8 || r_dcount < 8)
1898 return ((srttb_diff < 0) && (sqr_diff > var));
1902 * gfs2_rgrp_used_recently
1903 * @rs: The block reservation with the rgrp to test
1904 * @msecs: The time limit in milliseconds
1906 * Returns: True if the rgrp glock has been used within the time limit
1908 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1913 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1914 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1916 return tdiff > (msecs * 1000 * 1000);
1919 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1921 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1924 get_random_bytes(&skip, sizeof(skip));
1925 return skip % sdp->sd_rgrps;
1928 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1930 struct gfs2_rgrpd *rgd = *pos;
1931 struct gfs2_sbd *sdp = rgd->rd_sbd;
1933 rgd = gfs2_rgrpd_get_next(rgd);
1935 rgd = gfs2_rgrpd_get_first(sdp);
1937 if (rgd != begin) /* If we didn't wrap */
1943 * fast_to_acquire - determine if a resource group will be fast to acquire
1945 * If this is one of our preferred rgrps, it should be quicker to acquire,
1946 * because we tried to set ourselves up as dlm lock master.
1948 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1950 struct gfs2_glock *gl = rgd->rd_gl;
1952 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1953 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1954 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1956 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1962 * gfs2_inplace_reserve - Reserve space in the filesystem
1963 * @ip: the inode to reserve space for
1964 * @ap: the allocation parameters
1966 * We try our best to find an rgrp that has at least ap->target blocks
1967 * available. After a couple of passes (loops == 2), the prospects of finding
1968 * such an rgrp diminish. At this stage, we return the first rgrp that has
1969 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1970 * the number of blocks available in the chosen rgrp.
1972 * Returns: 0 on success,
1973 * -ENOMEM if a suitable rgrp can't be found
1977 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
1979 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1980 struct gfs2_rgrpd *begin = NULL;
1981 struct gfs2_blkreserv *rs = &ip->i_res;
1982 int error = 0, rg_locked, flags = 0;
1983 u64 last_unlinked = NO_BLOCK;
1987 if (sdp->sd_args.ar_rgrplvb)
1989 if (gfs2_assert_warn(sdp, ap->target))
1991 if (gfs2_rs_active(rs)) {
1992 begin = rs->rs_rbm.rgd;
1993 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1994 rs->rs_rbm.rgd = begin = ip->i_rgd;
1996 check_and_update_goal(ip);
1997 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1999 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
2000 skip = gfs2_orlov_skip(ip);
2001 if (rs->rs_rbm.rgd == NULL)
2007 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
2011 if (!gfs2_rs_active(rs)) {
2013 !fast_to_acquire(rs->rs_rbm.rgd))
2016 gfs2_rgrp_used_recently(rs, 1000) &&
2017 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2020 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2021 LM_ST_EXCLUSIVE, flags,
2023 if (unlikely(error))
2025 if (!gfs2_rs_active(rs) && (loops < 2) &&
2026 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2028 if (sdp->sd_args.ar_rgrplvb) {
2029 error = update_rgrp_lvb(rs->rs_rbm.rgd);
2030 if (unlikely(error)) {
2031 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2037 /* Skip unuseable resource groups */
2038 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2040 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2043 if (sdp->sd_args.ar_rgrplvb)
2044 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2046 /* Get a reservation if we don't already have one */
2047 if (!gfs2_rs_active(rs))
2048 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2050 /* Skip rgrps when we can't get a reservation on first pass */
2051 if (!gfs2_rs_active(rs) && (loops < 1))
2054 /* If rgrp has enough free space, use it */
2055 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2056 (loops == 2 && ap->min_target &&
2057 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
2058 ip->i_rgd = rs->rs_rbm.rgd;
2059 ap->allowed = ip->i_rgd->rd_free_clone;
2063 /* Check for unlinked inodes which can be reclaimed */
2064 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2065 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2068 /* Drop reservation, if we couldn't use reserved rgrp */
2069 if (gfs2_rs_active(rs))
2070 gfs2_rs_deltree(rs);
2072 /* Unlock rgrp if required */
2074 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2076 /* Find the next rgrp, and continue looking */
2077 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2082 /* If we've scanned all the rgrps, but found no free blocks
2083 * then this checks for some less likely conditions before
2087 /* Check that fs hasn't grown if writing to rindex */
2088 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2089 error = gfs2_ri_update(ip);
2093 /* Flushing the log may release space */
2095 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
2102 * gfs2_inplace_release - release an inplace reservation
2103 * @ip: the inode the reservation was taken out on
2105 * Release a reservation made by gfs2_inplace_reserve().
2108 void gfs2_inplace_release(struct gfs2_inode *ip)
2110 struct gfs2_blkreserv *rs = &ip->i_res;
2112 if (gfs2_holder_initialized(&rs->rs_rgd_gh))
2113 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2117 * gfs2_get_block_type - Check a block in a RG is of given type
2118 * @rgd: the resource group holding the block
2119 * @block: the block number
2121 * Returns: The block type (GFS2_BLKST_*)
2124 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
2126 struct gfs2_rbm rbm = { .rgd = rgd, };
2129 ret = gfs2_rbm_from_block(&rbm, block);
2130 WARN_ON_ONCE(ret != 0);
2132 return gfs2_testbit(&rbm);
2137 * gfs2_alloc_extent - allocate an extent from a given bitmap
2138 * @rbm: the resource group information
2139 * @dinode: TRUE if the first block we allocate is for a dinode
2140 * @n: The extent length (value/result)
2142 * Add the bitmap buffer to the transaction.
2143 * Set the found bits to @new_state to change block's allocation state.
2145 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2148 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2149 const unsigned int elen = *n;
2154 block = gfs2_rbm_to_block(rbm);
2155 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2156 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2159 ret = gfs2_rbm_from_block(&pos, block);
2160 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
2162 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2163 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2170 * rgblk_free - Change alloc state of given block(s)
2171 * @sdp: the filesystem
2172 * @bstart: the start of a run of blocks to free
2173 * @blen: the length of the block run (all must lie within ONE RG!)
2174 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2176 * Returns: Resource group containing the block(s)
2179 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2180 u32 blen, unsigned char new_state)
2182 struct gfs2_rbm rbm;
2183 struct gfs2_bitmap *bi, *bi_prev = NULL;
2185 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2187 if (gfs2_consist(sdp))
2188 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2192 gfs2_rbm_from_block(&rbm, bstart);
2195 if (bi != bi_prev) {
2196 if (!bi->bi_clone) {
2197 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2198 GFP_NOFS | __GFP_NOFAIL);
2199 memcpy(bi->bi_clone + bi->bi_offset,
2200 bi->bi_bh->b_data + bi->bi_offset,
2203 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2206 gfs2_setbit(&rbm, false, new_state);
2207 gfs2_rbm_incr(&rbm);
2214 * gfs2_rgrp_dump - print out an rgrp
2215 * @seq: The iterator
2216 * @gl: The glock in question
2220 void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2222 struct gfs2_rgrpd *rgd = gl->gl_object;
2223 struct gfs2_blkreserv *trs;
2224 const struct rb_node *n;
2228 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2229 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2230 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2231 rgd->rd_reserved, rgd->rd_extfail_pt);
2232 spin_lock(&rgd->rd_rsspin);
2233 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2234 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2237 spin_unlock(&rgd->rd_rsspin);
2240 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2242 struct gfs2_sbd *sdp = rgd->rd_sbd;
2243 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2244 (unsigned long long)rgd->rd_addr);
2245 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2246 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2247 rgd->rd_flags |= GFS2_RDF_ERROR;
2251 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2252 * @ip: The inode we have just allocated blocks for
2253 * @rbm: The start of the allocated blocks
2254 * @len: The extent length
2256 * Adjusts a reservation after an allocation has taken place. If the
2257 * reservation does not match the allocation, or if it is now empty
2258 * then it is removed.
2261 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2262 const struct gfs2_rbm *rbm, unsigned len)
2264 struct gfs2_blkreserv *rs = &ip->i_res;
2265 struct gfs2_rgrpd *rgd = rbm->rgd;
2270 spin_lock(&rgd->rd_rsspin);
2271 if (gfs2_rs_active(rs)) {
2272 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2273 block = gfs2_rbm_to_block(rbm);
2274 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2275 rlen = min(rs->rs_free, len);
2276 rs->rs_free -= rlen;
2277 rgd->rd_reserved -= rlen;
2278 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2279 if (rs->rs_free && !ret)
2281 /* We used up our block reservation, so we should
2282 reserve more blocks next time. */
2283 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
2288 spin_unlock(&rgd->rd_rsspin);
2292 * gfs2_set_alloc_start - Set starting point for block allocation
2293 * @rbm: The rbm which will be set to the required location
2294 * @ip: The gfs2 inode
2295 * @dinode: Flag to say if allocation includes a new inode
2297 * This sets the starting point from the reservation if one is active
2298 * otherwise it falls back to guessing a start point based on the
2299 * inode's goal block or the last allocation point in the rgrp.
2302 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2303 const struct gfs2_inode *ip, bool dinode)
2307 if (gfs2_rs_active(&ip->i_res)) {
2308 *rbm = ip->i_res.rs_rbm;
2312 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2315 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2317 gfs2_rbm_from_block(rbm, goal);
2321 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2322 * @ip: the inode to allocate the block for
2323 * @bn: Used to return the starting block number
2324 * @nblocks: requested number of blocks/extent length (value/result)
2325 * @dinode: 1 if we're allocating a dinode block, else 0
2326 * @generation: the generation number of the inode
2328 * Returns: 0 or error
2331 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2332 bool dinode, u64 *generation)
2334 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2335 struct buffer_head *dibh;
2336 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2338 u64 block; /* block, within the file system scope */
2341 gfs2_set_alloc_start(&rbm, ip, dinode);
2342 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
2344 if (error == -ENOSPC) {
2345 gfs2_set_alloc_start(&rbm, ip, dinode);
2346 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
2349 /* Since all blocks are reserved in advance, this shouldn't happen */
2351 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2352 (unsigned long long)ip->i_no_addr, error, *nblocks,
2353 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2354 rbm.rgd->rd_extfail_pt);
2358 gfs2_alloc_extent(&rbm, dinode, nblocks);
2359 block = gfs2_rbm_to_block(&rbm);
2360 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2361 if (gfs2_rs_active(&ip->i_res))
2362 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2368 ip->i_goal = block + ndata - 1;
2369 error = gfs2_meta_inode_buffer(ip, &dibh);
2371 struct gfs2_dinode *di =
2372 (struct gfs2_dinode *)dibh->b_data;
2373 gfs2_trans_add_meta(ip->i_gl, dibh);
2374 di->di_goal_meta = di->di_goal_data =
2375 cpu_to_be64(ip->i_goal);
2379 if (rbm.rgd->rd_free < *nblocks) {
2380 pr_warn("nblocks=%u\n", *nblocks);
2384 rbm.rgd->rd_free -= *nblocks;
2386 rbm.rgd->rd_dinodes++;
2387 *generation = rbm.rgd->rd_igeneration++;
2388 if (*generation == 0)
2389 *generation = rbm.rgd->rd_igeneration++;
2392 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2393 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2394 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2396 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2398 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
2400 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2402 rbm.rgd->rd_free_clone -= *nblocks;
2403 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2404 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2409 gfs2_rgrp_error(rbm.rgd);
2414 * __gfs2_free_blocks - free a contiguous run of block(s)
2415 * @ip: the inode these blocks are being freed from
2416 * @bstart: first block of a run of contiguous blocks
2417 * @blen: the length of the block run
2418 * @meta: 1 if the blocks represent metadata
2422 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2424 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2425 struct gfs2_rgrpd *rgd;
2427 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2430 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2431 rgd->rd_free += blen;
2432 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2433 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2434 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2435 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2437 /* Directories keep their data in the metadata address space */
2438 if (meta || ip->i_depth)
2439 gfs2_meta_wipe(ip, bstart, blen);
2443 * gfs2_free_meta - free a contiguous run of data block(s)
2444 * @ip: the inode these blocks are being freed from
2445 * @bstart: first block of a run of contiguous blocks
2446 * @blen: the length of the block run
2450 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2452 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2454 __gfs2_free_blocks(ip, bstart, blen, 1);
2455 gfs2_statfs_change(sdp, 0, +blen, 0);
2456 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2459 void gfs2_unlink_di(struct inode *inode)
2461 struct gfs2_inode *ip = GFS2_I(inode);
2462 struct gfs2_sbd *sdp = GFS2_SB(inode);
2463 struct gfs2_rgrpd *rgd;
2464 u64 blkno = ip->i_no_addr;
2466 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2469 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2470 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2471 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2472 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2473 update_rgrp_lvb_unlinked(rgd, 1);
2476 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2478 struct gfs2_sbd *sdp = rgd->rd_sbd;
2479 struct gfs2_rgrpd *tmp_rgd;
2481 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2484 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2486 if (!rgd->rd_dinodes)
2487 gfs2_consist_rgrpd(rgd);
2491 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2492 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2493 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2494 update_rgrp_lvb_unlinked(rgd, -1);
2496 gfs2_statfs_change(sdp, 0, +1, -1);
2500 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2502 gfs2_free_uninit_di(rgd, ip->i_no_addr);
2503 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2504 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2505 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2509 * gfs2_check_blk_type - Check the type of a block
2510 * @sdp: The superblock
2511 * @no_addr: The block number to check
2512 * @type: The block type we are looking for
2514 * Returns: 0 if the block type matches the expected type
2515 * -ESTALE if it doesn't match
2516 * or -ve errno if something went wrong while checking
2519 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2521 struct gfs2_rgrpd *rgd;
2522 struct gfs2_holder rgd_gh;
2523 int error = -EINVAL;
2525 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2529 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2533 if (gfs2_get_block_type(rgd, no_addr) != type)
2536 gfs2_glock_dq_uninit(&rgd_gh);
2542 * gfs2_rlist_add - add a RG to a list of RGs
2544 * @rlist: the list of resource groups
2547 * Figure out what RG a block belongs to and add that RG to the list
2549 * FIXME: Don't use NOFAIL
2553 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2556 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2557 struct gfs2_rgrpd *rgd;
2558 struct gfs2_rgrpd **tmp;
2559 unsigned int new_space;
2562 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2565 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2568 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2570 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2575 for (x = 0; x < rlist->rl_rgrps; x++)
2576 if (rlist->rl_rgd[x] == rgd)
2579 if (rlist->rl_rgrps == rlist->rl_space) {
2580 new_space = rlist->rl_space + 10;
2582 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2583 GFP_NOFS | __GFP_NOFAIL);
2585 if (rlist->rl_rgd) {
2586 memcpy(tmp, rlist->rl_rgd,
2587 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2588 kfree(rlist->rl_rgd);
2591 rlist->rl_space = new_space;
2592 rlist->rl_rgd = tmp;
2595 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2599 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2600 * and initialize an array of glock holders for them
2601 * @rlist: the list of resource groups
2602 * @state: the lock state to acquire the RG lock in
2604 * FIXME: Don't use NOFAIL
2608 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2612 rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
2613 GFP_NOFS | __GFP_NOFAIL);
2614 for (x = 0; x < rlist->rl_rgrps; x++)
2615 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2621 * gfs2_rlist_free - free a resource group list
2622 * @rlist: the list of resource groups
2626 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2630 kfree(rlist->rl_rgd);
2632 if (rlist->rl_ghs) {
2633 for (x = 0; x < rlist->rl_rgrps; x++)
2634 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2635 kfree(rlist->rl_ghs);
2636 rlist->rl_ghs = NULL;