1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "xfs_btree_staging.h"
15 #include "xfs_alloc_btree.h"
16 #include "xfs_alloc.h"
17 #include "xfs_extent_busy.h"
18 #include "xfs_error.h"
19 #include "xfs_trace.h"
20 #include "xfs_trans.h"
23 static struct kmem_cache *xfs_allocbt_cur_cache;
25 STATIC struct xfs_btree_cur *
26 xfs_allocbt_dup_cursor(
27 struct xfs_btree_cur *cur)
29 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
30 cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
35 struct xfs_btree_cur *cur,
36 const union xfs_btree_ptr *ptr,
39 struct xfs_buf *agbp = cur->bc_ag.agbp;
40 struct xfs_agf *agf = agbp->b_addr;
41 int btnum = cur->bc_btnum;
45 agf->agf_roots[btnum] = ptr->s;
46 be32_add_cpu(&agf->agf_levels[btnum], inc);
47 cur->bc_ag.pag->pagf_levels[btnum] += inc;
49 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
53 xfs_allocbt_alloc_block(
54 struct xfs_btree_cur *cur,
55 const union xfs_btree_ptr *start,
56 union xfs_btree_ptr *new,
62 /* Allocate the new block from the freelist. If we can't, give up. */
63 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
68 if (bno == NULLAGBLOCK) {
73 atomic64_inc(&cur->bc_mp->m_allocbt_blks);
74 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agbp->b_pag, bno, 1, false);
76 new->s = cpu_to_be32(bno);
83 xfs_allocbt_free_block(
84 struct xfs_btree_cur *cur,
87 struct xfs_buf *agbp = cur->bc_ag.agbp;
91 bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
92 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
96 atomic64_dec(&cur->bc_mp->m_allocbt_blks);
97 xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
98 XFS_EXTENT_BUSY_SKIP_DISCARD);
103 * Update the longest extent in the AGF
106 xfs_allocbt_update_lastrec(
107 struct xfs_btree_cur *cur,
108 const struct xfs_btree_block *block,
109 const union xfs_btree_rec *rec,
113 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
114 struct xfs_perag *pag;
118 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
123 * If this is the last leaf block and it's the last record,
124 * then update the size of the longest extent in the AG.
126 if (ptr != xfs_btree_get_numrecs(block))
128 len = rec->alloc.ar_blockcount;
131 if (be32_to_cpu(rec->alloc.ar_blockcount) <=
132 be32_to_cpu(agf->agf_longest))
134 len = rec->alloc.ar_blockcount;
137 numrecs = xfs_btree_get_numrecs(block);
140 ASSERT(ptr == numrecs + 1);
143 xfs_alloc_rec_t *rrp;
145 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
146 len = rrp->ar_blockcount;
157 agf->agf_longest = len;
158 pag = cur->bc_ag.agbp->b_pag;
159 pag->pagf_longest = be32_to_cpu(len);
160 xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
164 xfs_allocbt_get_minrecs(
165 struct xfs_btree_cur *cur,
168 return cur->bc_mp->m_alloc_mnr[level != 0];
172 xfs_allocbt_get_maxrecs(
173 struct xfs_btree_cur *cur,
176 return cur->bc_mp->m_alloc_mxr[level != 0];
180 xfs_allocbt_init_key_from_rec(
181 union xfs_btree_key *key,
182 const union xfs_btree_rec *rec)
184 key->alloc.ar_startblock = rec->alloc.ar_startblock;
185 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
189 xfs_bnobt_init_high_key_from_rec(
190 union xfs_btree_key *key,
191 const union xfs_btree_rec *rec)
195 x = be32_to_cpu(rec->alloc.ar_startblock);
196 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
197 key->alloc.ar_startblock = cpu_to_be32(x);
198 key->alloc.ar_blockcount = 0;
202 xfs_cntbt_init_high_key_from_rec(
203 union xfs_btree_key *key,
204 const union xfs_btree_rec *rec)
206 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
207 key->alloc.ar_startblock = 0;
211 xfs_allocbt_init_rec_from_cur(
212 struct xfs_btree_cur *cur,
213 union xfs_btree_rec *rec)
215 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
216 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
220 xfs_allocbt_init_ptr_from_cur(
221 struct xfs_btree_cur *cur,
222 union xfs_btree_ptr *ptr)
224 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
226 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
228 ptr->s = agf->agf_roots[cur->bc_btnum];
233 struct xfs_btree_cur *cur,
234 const union xfs_btree_key *key)
236 struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
237 const struct xfs_alloc_rec *kp = &key->alloc;
239 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
244 struct xfs_btree_cur *cur,
245 const union xfs_btree_key *key)
247 struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
248 const struct xfs_alloc_rec *kp = &key->alloc;
251 diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
255 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
259 xfs_bnobt_diff_two_keys(
260 struct xfs_btree_cur *cur,
261 const union xfs_btree_key *k1,
262 const union xfs_btree_key *k2)
264 return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
265 be32_to_cpu(k2->alloc.ar_startblock);
269 xfs_cntbt_diff_two_keys(
270 struct xfs_btree_cur *cur,
271 const union xfs_btree_key *k1,
272 const union xfs_btree_key *k2)
276 diff = be32_to_cpu(k1->alloc.ar_blockcount) -
277 be32_to_cpu(k2->alloc.ar_blockcount);
281 return be32_to_cpu(k1->alloc.ar_startblock) -
282 be32_to_cpu(k2->alloc.ar_startblock);
285 static xfs_failaddr_t
289 struct xfs_mount *mp = bp->b_mount;
290 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
291 struct xfs_perag *pag = bp->b_pag;
294 xfs_btnum_t btnum = XFS_BTNUM_BNOi;
296 if (!xfs_verify_magic(bp, block->bb_magic))
297 return __this_address;
299 if (xfs_has_crc(mp)) {
300 fa = xfs_btree_sblock_v5hdr_verify(bp);
306 * The perag may not be attached during grow operations or fully
307 * initialized from the AGF during log recovery. Therefore we can only
308 * check against maximum tree depth from those contexts.
310 * Otherwise check against the per-tree limit. Peek at one of the
311 * verifier magic values to determine the type of tree we're verifying
314 level = be16_to_cpu(block->bb_level);
315 if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
316 btnum = XFS_BTNUM_CNTi;
317 if (pag && pag->pagf_init) {
318 if (level >= pag->pagf_levels[btnum])
319 return __this_address;
320 } else if (level >= mp->m_alloc_maxlevels)
321 return __this_address;
323 return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
327 xfs_allocbt_read_verify(
332 if (!xfs_btree_sblock_verify_crc(bp))
333 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
335 fa = xfs_allocbt_verify(bp);
337 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
341 trace_xfs_btree_corrupt(bp, _RET_IP_);
345 xfs_allocbt_write_verify(
350 fa = xfs_allocbt_verify(bp);
352 trace_xfs_btree_corrupt(bp, _RET_IP_);
353 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
356 xfs_btree_sblock_calc_crc(bp);
360 const struct xfs_buf_ops xfs_bnobt_buf_ops = {
362 .magic = { cpu_to_be32(XFS_ABTB_MAGIC),
363 cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
364 .verify_read = xfs_allocbt_read_verify,
365 .verify_write = xfs_allocbt_write_verify,
366 .verify_struct = xfs_allocbt_verify,
369 const struct xfs_buf_ops xfs_cntbt_buf_ops = {
371 .magic = { cpu_to_be32(XFS_ABTC_MAGIC),
372 cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
373 .verify_read = xfs_allocbt_read_verify,
374 .verify_write = xfs_allocbt_write_verify,
375 .verify_struct = xfs_allocbt_verify,
379 xfs_bnobt_keys_inorder(
380 struct xfs_btree_cur *cur,
381 const union xfs_btree_key *k1,
382 const union xfs_btree_key *k2)
384 return be32_to_cpu(k1->alloc.ar_startblock) <
385 be32_to_cpu(k2->alloc.ar_startblock);
389 xfs_bnobt_recs_inorder(
390 struct xfs_btree_cur *cur,
391 const union xfs_btree_rec *r1,
392 const union xfs_btree_rec *r2)
394 return be32_to_cpu(r1->alloc.ar_startblock) +
395 be32_to_cpu(r1->alloc.ar_blockcount) <=
396 be32_to_cpu(r2->alloc.ar_startblock);
400 xfs_cntbt_keys_inorder(
401 struct xfs_btree_cur *cur,
402 const union xfs_btree_key *k1,
403 const union xfs_btree_key *k2)
405 return be32_to_cpu(k1->alloc.ar_blockcount) <
406 be32_to_cpu(k2->alloc.ar_blockcount) ||
407 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
408 be32_to_cpu(k1->alloc.ar_startblock) <
409 be32_to_cpu(k2->alloc.ar_startblock));
413 xfs_cntbt_recs_inorder(
414 struct xfs_btree_cur *cur,
415 const union xfs_btree_rec *r1,
416 const union xfs_btree_rec *r2)
418 return be32_to_cpu(r1->alloc.ar_blockcount) <
419 be32_to_cpu(r2->alloc.ar_blockcount) ||
420 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
421 be32_to_cpu(r1->alloc.ar_startblock) <
422 be32_to_cpu(r2->alloc.ar_startblock));
425 static const struct xfs_btree_ops xfs_bnobt_ops = {
426 .rec_len = sizeof(xfs_alloc_rec_t),
427 .key_len = sizeof(xfs_alloc_key_t),
429 .dup_cursor = xfs_allocbt_dup_cursor,
430 .set_root = xfs_allocbt_set_root,
431 .alloc_block = xfs_allocbt_alloc_block,
432 .free_block = xfs_allocbt_free_block,
433 .update_lastrec = xfs_allocbt_update_lastrec,
434 .get_minrecs = xfs_allocbt_get_minrecs,
435 .get_maxrecs = xfs_allocbt_get_maxrecs,
436 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
437 .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
438 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
439 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
440 .key_diff = xfs_bnobt_key_diff,
441 .buf_ops = &xfs_bnobt_buf_ops,
442 .diff_two_keys = xfs_bnobt_diff_two_keys,
443 .keys_inorder = xfs_bnobt_keys_inorder,
444 .recs_inorder = xfs_bnobt_recs_inorder,
447 static const struct xfs_btree_ops xfs_cntbt_ops = {
448 .rec_len = sizeof(xfs_alloc_rec_t),
449 .key_len = sizeof(xfs_alloc_key_t),
451 .dup_cursor = xfs_allocbt_dup_cursor,
452 .set_root = xfs_allocbt_set_root,
453 .alloc_block = xfs_allocbt_alloc_block,
454 .free_block = xfs_allocbt_free_block,
455 .update_lastrec = xfs_allocbt_update_lastrec,
456 .get_minrecs = xfs_allocbt_get_minrecs,
457 .get_maxrecs = xfs_allocbt_get_maxrecs,
458 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
459 .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
460 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
461 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
462 .key_diff = xfs_cntbt_key_diff,
463 .buf_ops = &xfs_cntbt_buf_ops,
464 .diff_two_keys = xfs_cntbt_diff_two_keys,
465 .keys_inorder = xfs_cntbt_keys_inorder,
466 .recs_inorder = xfs_cntbt_recs_inorder,
469 /* Allocate most of a new allocation btree cursor. */
470 STATIC struct xfs_btree_cur *
471 xfs_allocbt_init_common(
472 struct xfs_mount *mp,
473 struct xfs_trans *tp,
474 struct xfs_perag *pag,
477 struct xfs_btree_cur *cur;
479 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
481 cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels,
482 xfs_allocbt_cur_cache);
483 cur->bc_ag.abt.active = false;
485 if (btnum == XFS_BTNUM_CNT) {
486 cur->bc_ops = &xfs_cntbt_ops;
487 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
488 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
490 cur->bc_ops = &xfs_bnobt_ops;
491 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
494 /* take a reference for the cursor */
495 atomic_inc(&pag->pag_ref);
496 cur->bc_ag.pag = pag;
499 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
505 * Allocate a new allocation btree cursor.
507 struct xfs_btree_cur * /* new alloc btree cursor */
508 xfs_allocbt_init_cursor(
509 struct xfs_mount *mp, /* file system mount point */
510 struct xfs_trans *tp, /* transaction pointer */
511 struct xfs_buf *agbp, /* buffer for agf structure */
512 struct xfs_perag *pag,
513 xfs_btnum_t btnum) /* btree identifier */
515 struct xfs_agf *agf = agbp->b_addr;
516 struct xfs_btree_cur *cur;
518 cur = xfs_allocbt_init_common(mp, tp, pag, btnum);
519 if (btnum == XFS_BTNUM_CNT)
520 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
522 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
524 cur->bc_ag.agbp = agbp;
529 /* Create a free space btree cursor with a fake root for staging. */
530 struct xfs_btree_cur *
531 xfs_allocbt_stage_cursor(
532 struct xfs_mount *mp,
533 struct xbtree_afakeroot *afake,
534 struct xfs_perag *pag,
537 struct xfs_btree_cur *cur;
539 cur = xfs_allocbt_init_common(mp, NULL, pag, btnum);
540 xfs_btree_stage_afakeroot(cur, afake);
545 * Install a new free space btree root. Caller is responsible for invalidating
546 * and freeing the old btree blocks.
549 xfs_allocbt_commit_staged_btree(
550 struct xfs_btree_cur *cur,
551 struct xfs_trans *tp,
552 struct xfs_buf *agbp)
554 struct xfs_agf *agf = agbp->b_addr;
555 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
557 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
559 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
560 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
561 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
563 if (cur->bc_btnum == XFS_BTNUM_BNO) {
564 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
566 cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
567 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
571 /* Calculate number of records in an alloc btree block. */
572 static inline unsigned int
573 xfs_allocbt_block_maxrecs(
574 unsigned int blocklen,
578 return blocklen / sizeof(xfs_alloc_rec_t);
579 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
583 * Calculate number of records in an alloc btree block.
587 struct xfs_mount *mp,
591 blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
592 return xfs_allocbt_block_maxrecs(blocklen, leaf);
595 /* Free space btrees are at their largest when every other block is free. */
596 #define XFS_MAX_FREESP_RECORDS ((XFS_MAX_AG_BLOCKS + 1) / 2)
598 /* Compute the max possible height for free space btrees. */
600 xfs_allocbt_maxlevels_ondisk(void)
602 unsigned int minrecs[2];
603 unsigned int blocklen;
605 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
606 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
608 minrecs[0] = xfs_allocbt_block_maxrecs(blocklen, true) / 2;
609 minrecs[1] = xfs_allocbt_block_maxrecs(blocklen, false) / 2;
611 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_FREESP_RECORDS);
614 /* Calculate the freespace btree size for some records. */
616 xfs_allocbt_calc_size(
617 struct xfs_mount *mp,
618 unsigned long long len)
620 return xfs_btree_calc_size(mp->m_alloc_mnr, len);
624 xfs_allocbt_init_cur_cache(void)
626 xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur",
627 xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()),
630 if (!xfs_allocbt_cur_cache)
636 xfs_allocbt_destroy_cur_cache(void)
638 kmem_cache_destroy(xfs_allocbt_cur_cache);
639 xfs_allocbt_cur_cache = NULL;