2 * Copyright (c) 2014 Red Hat, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_trans.h"
30 #include "xfs_alloc.h"
31 #include "xfs_btree.h"
33 #include "xfs_rmap_btree.h"
34 #include "xfs_trace.h"
35 #include "xfs_cksum.h"
36 #include "xfs_error.h"
37 #include "xfs_extent_busy.h"
38 #include "xfs_ag_resv.h"
43 * This is a per-ag tree used to track the owner(s) of a given extent. With
44 * reflink it is possible for there to be multiple owners, which is a departure
45 * from classic XFS. Owner records for data extents are inserted when the
46 * extent is mapped and removed when an extent is unmapped. Owner records for
47 * all other block types (i.e. metadata) are inserted when an extent is
48 * allocated and removed when an extent is freed. There can only be one owner
49 * of a metadata extent, usually an inode or some other metadata structure like
52 * The rmap btree is part of the free space management, so blocks for the tree
53 * are sourced from the agfl. Hence we need transaction reservation support for
54 * this tree so that the freelist is always large enough. This also impacts on
55 * the minimum space we need to leave free in the AG.
57 * The tree is ordered by [ag block, owner, offset]. This is a large key size,
58 * but it is the only way to enforce unique keys when a block can be owned by
59 * multiple files at any offset. There's no need to order/search by extent
60 * size for online updating/management of the tree. It is intended that most
61 * reverse lookups will be to find the owner(s) of a particular block, or to
62 * try to recover tree and file data from corrupt primary metadata.
65 static struct xfs_btree_cur *
66 xfs_rmapbt_dup_cursor(
67 struct xfs_btree_cur *cur)
69 return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
70 cur->bc_private.a.agbp, cur->bc_private.a.agno);
75 struct xfs_btree_cur *cur,
76 union xfs_btree_ptr *ptr,
79 struct xfs_buf *agbp = cur->bc_private.a.agbp;
80 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
81 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
82 int btnum = cur->bc_btnum;
83 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
87 agf->agf_roots[btnum] = ptr->s;
88 be32_add_cpu(&agf->agf_levels[btnum], inc);
89 pag->pagf_levels[btnum] += inc;
92 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
96 xfs_rmapbt_alloc_block(
97 struct xfs_btree_cur *cur,
98 union xfs_btree_ptr *start,
99 union xfs_btree_ptr *new,
102 struct xfs_buf *agbp = cur->bc_private.a.agbp;
103 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
107 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
109 /* Allocate the new block from the freelist. If we can't, give up. */
110 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
113 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
117 trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
119 if (bno == NULLAGBLOCK) {
120 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
125 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
128 xfs_trans_agbtree_delta(cur->bc_tp, 1);
129 new->s = cpu_to_be32(bno);
130 be32_add_cpu(&agf->agf_rmap_blocks, 1);
131 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
133 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
139 xfs_rmapbt_free_block(
140 struct xfs_btree_cur *cur,
143 struct xfs_buf *agbp = cur->bc_private.a.agbp;
144 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
148 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
149 trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
151 be32_add_cpu(&agf->agf_rmap_blocks, -1);
152 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
153 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
157 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
158 XFS_EXTENT_BUSY_SKIP_DISCARD);
159 xfs_trans_agbtree_delta(cur->bc_tp, -1);
165 xfs_rmapbt_get_minrecs(
166 struct xfs_btree_cur *cur,
169 return cur->bc_mp->m_rmap_mnr[level != 0];
173 xfs_rmapbt_get_maxrecs(
174 struct xfs_btree_cur *cur,
177 return cur->bc_mp->m_rmap_mxr[level != 0];
181 xfs_rmapbt_init_key_from_rec(
182 union xfs_btree_key *key,
183 union xfs_btree_rec *rec)
185 key->rmap.rm_startblock = rec->rmap.rm_startblock;
186 key->rmap.rm_owner = rec->rmap.rm_owner;
187 key->rmap.rm_offset = rec->rmap.rm_offset;
191 * The high key for a reverse mapping record can be computed by shifting
192 * the startblock and offset to the highest value that would still map
193 * to that record. In practice this means that we add blockcount-1 to
194 * the startblock for all records, and if the record is for a data/attr
195 * fork mapping, we add blockcount-1 to the offset too.
198 xfs_rmapbt_init_high_key_from_rec(
199 union xfs_btree_key *key,
200 union xfs_btree_rec *rec)
205 adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
207 key->rmap.rm_startblock = rec->rmap.rm_startblock;
208 be32_add_cpu(&key->rmap.rm_startblock, adj);
209 key->rmap.rm_owner = rec->rmap.rm_owner;
210 key->rmap.rm_offset = rec->rmap.rm_offset;
211 if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
212 XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
214 off = be64_to_cpu(key->rmap.rm_offset);
215 off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
216 key->rmap.rm_offset = cpu_to_be64(off);
220 xfs_rmapbt_init_rec_from_cur(
221 struct xfs_btree_cur *cur,
222 union xfs_btree_rec *rec)
224 rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
225 rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
226 rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
227 rec->rmap.rm_offset = cpu_to_be64(
228 xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
232 xfs_rmapbt_init_ptr_from_cur(
233 struct xfs_btree_cur *cur,
234 union xfs_btree_ptr *ptr)
236 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
238 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
239 ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
241 ptr->s = agf->agf_roots[cur->bc_btnum];
246 struct xfs_btree_cur *cur,
247 union xfs_btree_key *key)
249 struct xfs_rmap_irec *rec = &cur->bc_rec.r;
250 struct xfs_rmap_key *kp = &key->rmap;
254 d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
258 x = be64_to_cpu(kp->rm_owner);
265 x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
275 xfs_rmapbt_diff_two_keys(
276 struct xfs_btree_cur *cur,
277 union xfs_btree_key *k1,
278 union xfs_btree_key *k2)
280 struct xfs_rmap_key *kp1 = &k1->rmap;
281 struct xfs_rmap_key *kp2 = &k2->rmap;
285 d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
286 be32_to_cpu(kp2->rm_startblock);
290 x = be64_to_cpu(kp1->rm_owner);
291 y = be64_to_cpu(kp2->rm_owner);
297 x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
298 y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
310 struct xfs_mount *mp = bp->b_target->bt_mount;
311 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
312 struct xfs_perag *pag = bp->b_pag;
316 * magic number and level verification
318 * During growfs operations, we can't verify the exact level or owner as
319 * the perag is not fully initialised and hence not attached to the
320 * buffer. In this case, check against the maximum tree depth.
322 * Similarly, during log recovery we will have a perag structure
323 * attached, but the agf information will not yet have been initialised
324 * from the on disk AGF. Again, we can only check against maximum limits
327 if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC))
330 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
332 if (!xfs_btree_sblock_v5hdr_verify(bp))
335 level = be16_to_cpu(block->bb_level);
336 if (pag && pag->pagf_init) {
337 if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
339 } else if (level >= mp->m_rmap_maxlevels)
342 return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
346 xfs_rmapbt_read_verify(
349 if (!xfs_btree_sblock_verify_crc(bp))
350 xfs_buf_ioerror(bp, -EFSBADCRC);
351 else if (!xfs_rmapbt_verify(bp))
352 xfs_buf_ioerror(bp, -EFSCORRUPTED);
355 trace_xfs_btree_corrupt(bp, _RET_IP_);
356 xfs_verifier_error(bp);
361 xfs_rmapbt_write_verify(
364 if (!xfs_rmapbt_verify(bp)) {
365 trace_xfs_btree_corrupt(bp, _RET_IP_);
366 xfs_buf_ioerror(bp, -EFSCORRUPTED);
367 xfs_verifier_error(bp);
370 xfs_btree_sblock_calc_crc(bp);
374 const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
375 .name = "xfs_rmapbt",
376 .verify_read = xfs_rmapbt_read_verify,
377 .verify_write = xfs_rmapbt_write_verify,
381 xfs_rmapbt_keys_inorder(
382 struct xfs_btree_cur *cur,
383 union xfs_btree_key *k1,
384 union xfs_btree_key *k2)
391 x = be32_to_cpu(k1->rmap.rm_startblock);
392 y = be32_to_cpu(k2->rmap.rm_startblock);
397 a = be64_to_cpu(k1->rmap.rm_owner);
398 b = be64_to_cpu(k2->rmap.rm_owner);
403 a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
404 b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
411 xfs_rmapbt_recs_inorder(
412 struct xfs_btree_cur *cur,
413 union xfs_btree_rec *r1,
414 union xfs_btree_rec *r2)
421 x = be32_to_cpu(r1->rmap.rm_startblock);
422 y = be32_to_cpu(r2->rmap.rm_startblock);
427 a = be64_to_cpu(r1->rmap.rm_owner);
428 b = be64_to_cpu(r2->rmap.rm_owner);
433 a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
434 b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
440 static const struct xfs_btree_ops xfs_rmapbt_ops = {
441 .rec_len = sizeof(struct xfs_rmap_rec),
442 .key_len = 2 * sizeof(struct xfs_rmap_key),
444 .dup_cursor = xfs_rmapbt_dup_cursor,
445 .set_root = xfs_rmapbt_set_root,
446 .alloc_block = xfs_rmapbt_alloc_block,
447 .free_block = xfs_rmapbt_free_block,
448 .get_minrecs = xfs_rmapbt_get_minrecs,
449 .get_maxrecs = xfs_rmapbt_get_maxrecs,
450 .init_key_from_rec = xfs_rmapbt_init_key_from_rec,
451 .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
452 .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
453 .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur,
454 .key_diff = xfs_rmapbt_key_diff,
455 .buf_ops = &xfs_rmapbt_buf_ops,
456 .diff_two_keys = xfs_rmapbt_diff_two_keys,
457 .keys_inorder = xfs_rmapbt_keys_inorder,
458 .recs_inorder = xfs_rmapbt_recs_inorder,
462 * Allocate a new allocation btree cursor.
464 struct xfs_btree_cur *
465 xfs_rmapbt_init_cursor(
466 struct xfs_mount *mp,
467 struct xfs_trans *tp,
468 struct xfs_buf *agbp,
471 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
472 struct xfs_btree_cur *cur;
474 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
477 /* Overlapping btree; 2 keys per pointer. */
478 cur->bc_btnum = XFS_BTNUM_RMAP;
479 cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
480 cur->bc_blocklog = mp->m_sb.sb_blocklog;
481 cur->bc_ops = &xfs_rmapbt_ops;
482 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
483 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
485 cur->bc_private.a.agbp = agbp;
486 cur->bc_private.a.agno = agno;
492 * Calculate number of records in an rmap btree block.
496 struct xfs_mount *mp,
500 blocklen -= XFS_RMAP_BLOCK_LEN;
503 return blocklen / sizeof(struct xfs_rmap_rec);
505 (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
508 /* Compute the maximum height of an rmap btree. */
510 xfs_rmapbt_compute_maxlevels(
511 struct xfs_mount *mp)
514 * On a non-reflink filesystem, the maximum number of rmap
515 * records is the number of blocks in the AG, hence the max
516 * rmapbt height is log_$maxrecs($agblocks). However, with
517 * reflink each AG block can have up to 2^32 (per the refcount
518 * record format) owners, which means that theoretically we
519 * could face up to 2^64 rmap records.
521 * That effectively means that the max rmapbt height must be
522 * XFS_BTREE_MAXLEVELS. "Fortunately" we'll run out of AG
523 * blocks to feed the rmapbt long before the rmapbt reaches
524 * maximum height. The reflink code uses ag_resv_critical to
525 * disallow reflinking when less than 10% of the per-AG metadata
526 * block reservation since the fallback is a regular file copy.
528 if (xfs_sb_version_hasreflink(&mp->m_sb))
529 mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
531 mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
532 mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
535 /* Calculate the refcount btree size for some records. */
537 xfs_rmapbt_calc_size(
538 struct xfs_mount *mp,
539 unsigned long long len)
541 return xfs_btree_calc_size(mp, mp->m_rmap_mnr, len);
545 * Calculate the maximum refcount btree size.
549 struct xfs_mount *mp,
550 xfs_agblock_t agblocks)
552 /* Bail out if we're uninitialized, which can happen in mkfs. */
553 if (mp->m_rmap_mxr[0] == 0)
556 return xfs_rmapbt_calc_size(mp, agblocks);
560 * Figure out how many blocks to reserve and how many are used by this btree.
563 xfs_rmapbt_calc_reserves(
564 struct xfs_mount *mp,
569 struct xfs_buf *agbp;
571 xfs_agblock_t agblocks;
572 xfs_extlen_t tree_len;
575 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
578 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
582 agf = XFS_BUF_TO_AGF(agbp);
583 agblocks = be32_to_cpu(agf->agf_length);
584 tree_len = be32_to_cpu(agf->agf_rmap_blocks);
587 /* Reserve 1% of the AG or enough for 1 block per record. */
588 *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));