2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_trans_space.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_trace.h"
46 #include "xfs_symlink.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_filestream.h"
50 #include "xfs_ag_resv.h"
51 #include "xfs_refcount.h"
52 #include "xfs_rmap_btree.h"
53 #include "xfs_icache.h"
56 kmem_zone_t *xfs_bmap_free_item_zone;
59 * Miscellaneous helper functions
63 * Compute and fill in the value of the maximum depth of a bmap btree
64 * in this filesystem. Done once, during mount.
67 xfs_bmap_compute_maxlevels(
68 xfs_mount_t *mp, /* file system mount structure */
69 int whichfork) /* data or attr fork */
71 int level; /* btree level */
72 uint maxblocks; /* max blocks at this level */
73 uint maxleafents; /* max leaf entries possible */
74 int maxrootrecs; /* max records in root block */
75 int minleafrecs; /* min records in leaf block */
76 int minnoderecs; /* min records in node block */
77 int sz; /* root block size */
80 * The maximum number of extents in a file, hence the maximum
81 * number of leaf entries, is controlled by the type of di_nextents
82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
83 * (a signed 16-bit number, xfs_aextnum_t).
85 * Note that we can no longer assume that if we are in ATTR1 that
86 * the fork offset of all the inodes will be
87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
88 * with ATTR2 and then mounted back with ATTR1, keeping the
89 * di_forkoff's fixed but probably at various positions. Therefore,
90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
91 * of a minimum size available.
93 if (whichfork == XFS_DATA_FORK) {
94 maxleafents = MAXEXTNUM;
95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
97 maxleafents = MAXAEXTNUM;
98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
101 minleafrecs = mp->m_bmap_dmnr[0];
102 minnoderecs = mp->m_bmap_dmnr[1];
103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
104 for (level = 1; maxblocks > 1; level++) {
105 if (maxblocks <= maxrootrecs)
108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
110 mp->m_bm_maxlevels[whichfork] = level;
113 STATIC int /* error */
115 struct xfs_btree_cur *cur,
119 int *stat) /* success/failure */
121 cur->bc_rec.b.br_startoff = off;
122 cur->bc_rec.b.br_startblock = bno;
123 cur->bc_rec.b.br_blockcount = len;
124 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
127 STATIC int /* error */
129 struct xfs_btree_cur *cur,
133 int *stat) /* success/failure */
135 cur->bc_rec.b.br_startoff = off;
136 cur->bc_rec.b.br_startblock = bno;
137 cur->bc_rec.b.br_blockcount = len;
138 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
142 * Check if the inode needs to be converted to btree format.
144 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
146 return whichfork != XFS_COW_FORK &&
147 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
148 XFS_IFORK_NEXTENTS(ip, whichfork) >
149 XFS_IFORK_MAXEXT(ip, whichfork);
153 * Check if the inode should be converted to extent format.
155 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
157 return whichfork != XFS_COW_FORK &&
158 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
159 XFS_IFORK_NEXTENTS(ip, whichfork) <=
160 XFS_IFORK_MAXEXT(ip, whichfork);
164 * Update the record referred to by cur to the value given
165 * by [off, bno, len, state].
166 * This either works (return 0) or gets an EFSCORRUPTED error.
170 struct xfs_btree_cur *cur,
176 union xfs_btree_rec rec;
178 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
179 return xfs_btree_update(cur, &rec);
183 * Compute the worst-case number of indirect blocks that will be used
184 * for ip's delayed extent of length "len".
187 xfs_bmap_worst_indlen(
188 xfs_inode_t *ip, /* incore inode pointer */
189 xfs_filblks_t len) /* delayed extent length */
191 int level; /* btree level number */
192 int maxrecs; /* maximum record count at this level */
193 xfs_mount_t *mp; /* mount structure */
194 xfs_filblks_t rval; /* return value */
195 xfs_filblks_t orig_len;
199 /* Calculate the worst-case size of the bmbt. */
201 maxrecs = mp->m_bmap_dmxr[0];
202 for (level = 0, rval = 0;
203 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
206 do_div(len, maxrecs);
209 rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
214 maxrecs = mp->m_bmap_dmxr[1];
217 /* Calculate the worst-case size of the rmapbt. */
218 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
219 rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) +
220 mp->m_rmap_maxlevels;
226 * Calculate the default attribute fork offset for newly created inodes.
229 xfs_default_attroffset(
230 struct xfs_inode *ip)
232 struct xfs_mount *mp = ip->i_mount;
235 if (mp->m_sb.sb_inodesize == 256) {
236 offset = XFS_LITINO(mp, ip->i_d.di_version) -
237 XFS_BMDR_SPACE_CALC(MINABTPTRS);
239 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
242 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
247 * Helper routine to reset inode di_forkoff field when switching
248 * attribute fork from local to extent format - we reset it where
249 * possible to make space available for inline data fork extents.
252 xfs_bmap_forkoff_reset(
256 if (whichfork == XFS_ATTR_FORK &&
257 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
258 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
259 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
260 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
262 if (dfl_forkoff > ip->i_d.di_forkoff)
263 ip->i_d.di_forkoff = dfl_forkoff;
268 STATIC struct xfs_buf *
270 struct xfs_btree_cur *cur,
273 struct xfs_log_item_desc *lidp;
279 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
280 if (!cur->bc_bufs[i])
282 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
283 return cur->bc_bufs[i];
286 /* Chase down all the log items to see if the bp is there */
287 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
288 struct xfs_buf_log_item *bip;
289 bip = (struct xfs_buf_log_item *)lidp->lid_item;
290 if (bip->bli_item.li_type == XFS_LI_BUF &&
291 XFS_BUF_ADDR(bip->bli_buf) == bno)
300 struct xfs_btree_block *block,
306 __be64 *pp, *thispa; /* pointer to block address */
307 xfs_bmbt_key_t *prevp, *keyp;
309 ASSERT(be16_to_cpu(block->bb_level) > 0);
312 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
313 dmxr = mp->m_bmap_dmxr[0];
314 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
317 ASSERT(be64_to_cpu(prevp->br_startoff) <
318 be64_to_cpu(keyp->br_startoff));
323 * Compare the block numbers to see if there are dups.
326 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
328 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
330 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
332 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
334 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
335 if (*thispa == *pp) {
336 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
338 (unsigned long long)be64_to_cpu(*thispa));
339 panic("%s: ptrs are equal in node\n",
347 * Check that the extents for the inode ip are in the right order in all
348 * btree leaves. THis becomes prohibitively expensive for large extent count
349 * files, so don't bother with inodes that have more than 10,000 extents in
350 * them. The btree record ordering checks will still be done, so for such large
351 * bmapbt constructs that is going to catch most corruptions.
354 xfs_bmap_check_leaf_extents(
355 xfs_btree_cur_t *cur, /* btree cursor or null */
356 xfs_inode_t *ip, /* incore inode pointer */
357 int whichfork) /* data or attr fork */
359 struct xfs_btree_block *block; /* current btree block */
360 xfs_fsblock_t bno; /* block # of "block" */
361 xfs_buf_t *bp; /* buffer for "block" */
362 int error; /* error return value */
363 xfs_extnum_t i=0, j; /* index into the extents list */
364 xfs_ifork_t *ifp; /* fork structure */
365 int level; /* btree level, for checking */
366 xfs_mount_t *mp; /* file system mount structure */
367 __be64 *pp; /* pointer to block address */
368 xfs_bmbt_rec_t *ep; /* pointer to current extent */
369 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
370 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
373 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
377 /* skip large extent count inodes */
378 if (ip->i_d.di_nextents > 10000)
383 ifp = XFS_IFORK_PTR(ip, whichfork);
384 block = ifp->if_broot;
386 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
388 level = be16_to_cpu(block->bb_level);
390 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
391 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
392 bno = be64_to_cpu(*pp);
394 ASSERT(bno != NULLFSBLOCK);
395 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
396 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
399 * Go down the tree until leaf level is reached, following the first
400 * pointer (leftmost) at each level.
402 while (level-- > 0) {
403 /* See if buf is in cur first */
405 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
408 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
414 block = XFS_BUF_TO_BLOCK(bp);
419 * Check this block for basic sanity (increasing keys and
420 * no duplicate blocks).
423 xfs_check_block(block, mp, 0, 0);
424 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
425 bno = be64_to_cpu(*pp);
426 XFS_WANT_CORRUPTED_GOTO(mp,
427 XFS_FSB_SANITY_CHECK(mp, bno), error0);
430 xfs_trans_brelse(NULL, bp);
435 * Here with bp and block set to the leftmost leaf node in the tree.
440 * Loop over all leaf nodes checking that all extents are in the right order.
443 xfs_fsblock_t nextbno;
444 xfs_extnum_t num_recs;
447 num_recs = xfs_btree_get_numrecs(block);
450 * Read-ahead the next leaf block, if any.
453 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
456 * Check all the extents to make sure they are OK.
457 * If we had a previous block, the last entry should
458 * conform with the first entry in this one.
461 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
463 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
464 xfs_bmbt_disk_get_blockcount(&last) <=
465 xfs_bmbt_disk_get_startoff(ep));
467 for (j = 1; j < num_recs; j++) {
468 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
469 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
470 xfs_bmbt_disk_get_blockcount(ep) <=
471 xfs_bmbt_disk_get_startoff(nextp));
479 xfs_trans_brelse(NULL, bp);
483 * If we've reached the end, stop.
485 if (bno == NULLFSBLOCK)
489 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
492 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
498 block = XFS_BUF_TO_BLOCK(bp);
504 xfs_warn(mp, "%s: at error0", __func__);
506 xfs_trans_brelse(NULL, bp);
508 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
510 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
515 * Add bmap trace insert entries for all the contents of the extent records.
518 xfs_bmap_trace_exlist(
519 xfs_inode_t *ip, /* incore inode pointer */
520 xfs_extnum_t cnt, /* count of entries in the list */
521 int whichfork, /* data or attr or cow fork */
522 unsigned long caller_ip)
524 xfs_extnum_t idx; /* extent record index */
525 xfs_ifork_t *ifp; /* inode fork pointer */
528 if (whichfork == XFS_ATTR_FORK)
529 state |= BMAP_ATTRFORK;
530 else if (whichfork == XFS_COW_FORK)
531 state |= BMAP_COWFORK;
533 ifp = XFS_IFORK_PTR(ip, whichfork);
534 ASSERT(cnt == xfs_iext_count(ifp));
535 for (idx = 0; idx < cnt; idx++)
536 trace_xfs_extlist(ip, idx, state, caller_ip);
540 * Validate that the bmbt_irecs being returned from bmapi are valid
541 * given the caller's original parameters. Specifically check the
542 * ranges of the returned irecs to ensure that they only extend beyond
543 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
546 xfs_bmap_validate_ret(
550 xfs_bmbt_irec_t *mval,
554 int i; /* index to map values */
556 ASSERT(ret_nmap <= nmap);
558 for (i = 0; i < ret_nmap; i++) {
559 ASSERT(mval[i].br_blockcount > 0);
560 if (!(flags & XFS_BMAPI_ENTIRE)) {
561 ASSERT(mval[i].br_startoff >= bno);
562 ASSERT(mval[i].br_blockcount <= len);
563 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
566 ASSERT(mval[i].br_startoff < bno + len);
567 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
571 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
572 mval[i].br_startoff);
573 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
574 mval[i].br_startblock != HOLESTARTBLOCK);
575 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
576 mval[i].br_state == XFS_EXT_UNWRITTEN);
581 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
582 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
586 * bmap free list manipulation functions
590 * Add the extent to the list of extents to be free at transaction end.
591 * The list is maintained sorted (by block number).
595 struct xfs_mount *mp,
596 struct xfs_defer_ops *dfops,
599 struct xfs_owner_info *oinfo)
601 struct xfs_extent_free_item *new; /* new element */
606 ASSERT(bno != NULLFSBLOCK);
608 ASSERT(len <= MAXEXTLEN);
609 ASSERT(!isnullstartblock(bno));
610 agno = XFS_FSB_TO_AGNO(mp, bno);
611 agbno = XFS_FSB_TO_AGBNO(mp, bno);
612 ASSERT(agno < mp->m_sb.sb_agcount);
613 ASSERT(agbno < mp->m_sb.sb_agblocks);
614 ASSERT(len < mp->m_sb.sb_agblocks);
615 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
617 ASSERT(xfs_bmap_free_item_zone != NULL);
619 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
620 new->xefi_startblock = bno;
621 new->xefi_blockcount = (xfs_extlen_t)len;
623 new->xefi_oinfo = *oinfo;
625 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
626 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
627 XFS_FSB_TO_AGBNO(mp, bno), len);
628 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
632 * Inode fork format manipulation functions
636 * Transform a btree format file with only one leaf node, where the
637 * extents list will fit in the inode, into an extents format file.
638 * Since the file extents are already in-core, all we have to do is
639 * give up the space for the btree root and pitch the leaf block.
641 STATIC int /* error */
642 xfs_bmap_btree_to_extents(
643 xfs_trans_t *tp, /* transaction pointer */
644 xfs_inode_t *ip, /* incore inode pointer */
645 xfs_btree_cur_t *cur, /* btree cursor */
646 int *logflagsp, /* inode logging flags */
647 int whichfork) /* data or attr fork */
650 struct xfs_btree_block *cblock;/* child btree block */
651 xfs_fsblock_t cbno; /* child block number */
652 xfs_buf_t *cbp; /* child block's buffer */
653 int error; /* error return value */
654 xfs_ifork_t *ifp; /* inode fork data */
655 xfs_mount_t *mp; /* mount point structure */
656 __be64 *pp; /* ptr to block address */
657 struct xfs_btree_block *rblock;/* root btree block */
658 struct xfs_owner_info oinfo;
661 ifp = XFS_IFORK_PTR(ip, whichfork);
662 ASSERT(whichfork != XFS_COW_FORK);
663 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
664 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
665 rblock = ifp->if_broot;
666 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
667 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
668 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
669 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
670 cbno = be64_to_cpu(*pp);
673 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
676 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
680 cblock = XFS_BUF_TO_BLOCK(cbp);
681 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
683 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
684 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
685 ip->i_d.di_nblocks--;
686 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
687 xfs_trans_binval(tp, cbp);
688 if (cur->bc_bufs[0] == cbp)
689 cur->bc_bufs[0] = NULL;
690 xfs_iroot_realloc(ip, -1, whichfork);
691 ASSERT(ifp->if_broot == NULL);
692 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
693 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
694 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
699 * Convert an extents-format file into a btree-format file.
700 * The new file will have a root block (in the inode) and a single child block.
702 STATIC int /* error */
703 xfs_bmap_extents_to_btree(
704 xfs_trans_t *tp, /* transaction pointer */
705 xfs_inode_t *ip, /* incore inode pointer */
706 xfs_fsblock_t *firstblock, /* first-block-allocated */
707 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
708 xfs_btree_cur_t **curp, /* cursor returned to caller */
709 int wasdel, /* converting a delayed alloc */
710 int *logflagsp, /* inode logging flags */
711 int whichfork) /* data or attr fork */
713 struct xfs_btree_block *ablock; /* allocated (child) bt block */
714 xfs_buf_t *abp; /* buffer for ablock */
715 xfs_alloc_arg_t args; /* allocation arguments */
716 xfs_bmbt_rec_t *arp; /* child record pointer */
717 struct xfs_btree_block *block; /* btree root block */
718 xfs_btree_cur_t *cur; /* bmap btree cursor */
719 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
720 int error; /* error return value */
721 xfs_extnum_t i, cnt; /* extent record index */
722 xfs_ifork_t *ifp; /* inode fork pointer */
723 xfs_bmbt_key_t *kp; /* root block key pointer */
724 xfs_mount_t *mp; /* mount structure */
725 xfs_extnum_t nextents; /* number of file extents */
726 xfs_bmbt_ptr_t *pp; /* root block address pointer */
729 ASSERT(whichfork != XFS_COW_FORK);
730 ifp = XFS_IFORK_PTR(ip, whichfork);
731 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
734 * Make space in the inode incore.
736 xfs_iroot_realloc(ip, 1, whichfork);
737 ifp->if_flags |= XFS_IFBROOT;
742 block = ifp->if_broot;
743 if (xfs_sb_version_hascrc(&mp->m_sb))
744 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
745 XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino,
746 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
748 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
749 XFS_BMAP_MAGIC, 1, 1, ip->i_ino,
750 XFS_BTREE_LONG_PTRS);
753 * Need a cursor. Can't allocate until bb_level is filled in.
755 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
756 cur->bc_private.b.firstblock = *firstblock;
757 cur->bc_private.b.dfops = dfops;
758 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
760 * Convert to a btree with two levels, one record in root.
762 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
763 memset(&args, 0, sizeof(args));
766 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
767 args.firstblock = *firstblock;
768 if (*firstblock == NULLFSBLOCK) {
769 args.type = XFS_ALLOCTYPE_START_BNO;
770 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
771 } else if (dfops->dop_low) {
772 args.type = XFS_ALLOCTYPE_START_BNO;
774 args.fsbno = *firstblock;
776 args.type = XFS_ALLOCTYPE_NEAR_BNO;
777 args.fsbno = *firstblock;
779 args.minlen = args.maxlen = args.prod = 1;
780 args.wasdel = wasdel;
782 if ((error = xfs_alloc_vextent(&args))) {
783 xfs_iroot_realloc(ip, -1, whichfork);
784 ASSERT(ifp->if_broot == NULL);
785 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
786 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
791 * During a CoW operation, the allocation and bmbt updates occur in
792 * different transactions. The mapping code tries to put new bmbt
793 * blocks near extents being mapped, but the only way to guarantee this
794 * is if the alloc and the mapping happen in a single transaction that
795 * has a block reservation. That isn't the case here, so if we run out
796 * of space we'll try again with another AG.
798 if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
799 args.fsbno == NULLFSBLOCK &&
800 args.type == XFS_ALLOCTYPE_NEAR_BNO) {
801 args.type = XFS_ALLOCTYPE_FIRST_AG;
804 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
805 xfs_iroot_realloc(ip, -1, whichfork);
806 ASSERT(ifp->if_broot == NULL);
807 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
808 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
812 * Allocation can't fail, the space was reserved.
814 ASSERT(*firstblock == NULLFSBLOCK ||
815 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
816 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
817 cur->bc_private.b.allocated++;
818 ip->i_d.di_nblocks++;
819 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
820 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
822 * Fill in the child block.
824 abp->b_ops = &xfs_bmbt_buf_ops;
825 ablock = XFS_BUF_TO_BLOCK(abp);
826 if (xfs_sb_version_hascrc(&mp->m_sb))
827 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
828 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
829 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
831 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
832 XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
833 XFS_BTREE_LONG_PTRS);
835 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
836 nextents = xfs_iext_count(ifp);
837 for (cnt = i = 0; i < nextents; i++) {
838 ep = xfs_iext_get_ext(ifp, i);
839 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
840 arp->l0 = cpu_to_be64(ep->l0);
841 arp->l1 = cpu_to_be64(ep->l1);
845 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
846 xfs_btree_set_numrecs(ablock, cnt);
849 * Fill in the root key and pointer.
851 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
852 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
853 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
854 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
855 be16_to_cpu(block->bb_level)));
856 *pp = cpu_to_be64(args.fsbno);
859 * Do all this logging at the end so that
860 * the root is at the right level.
862 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
863 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
864 ASSERT(*curp == NULL);
866 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
871 * Convert a local file to an extents file.
872 * This code is out of bounds for data forks of regular files,
873 * since the file data needs to get logged so things will stay consistent.
874 * (The bmap-level manipulations are ok, though).
877 xfs_bmap_local_to_extents_empty(
878 struct xfs_inode *ip,
881 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
883 ASSERT(whichfork != XFS_COW_FORK);
884 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
885 ASSERT(ifp->if_bytes == 0);
886 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
888 xfs_bmap_forkoff_reset(ip, whichfork);
889 ifp->if_flags &= ~XFS_IFINLINE;
890 ifp->if_flags |= XFS_IFEXTENTS;
891 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
895 STATIC int /* error */
896 xfs_bmap_local_to_extents(
897 xfs_trans_t *tp, /* transaction pointer */
898 xfs_inode_t *ip, /* incore inode pointer */
899 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
900 xfs_extlen_t total, /* total blocks needed by transaction */
901 int *logflagsp, /* inode logging flags */
903 void (*init_fn)(struct xfs_trans *tp,
905 struct xfs_inode *ip,
906 struct xfs_ifork *ifp))
909 int flags; /* logging flags returned */
910 xfs_ifork_t *ifp; /* inode fork pointer */
911 xfs_alloc_arg_t args; /* allocation arguments */
912 xfs_buf_t *bp; /* buffer for extent block */
913 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
916 * We don't want to deal with the case of keeping inode data inline yet.
917 * So sending the data fork of a regular inode is invalid.
919 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
920 ifp = XFS_IFORK_PTR(ip, whichfork);
921 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
923 if (!ifp->if_bytes) {
924 xfs_bmap_local_to_extents_empty(ip, whichfork);
925 flags = XFS_ILOG_CORE;
931 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
933 memset(&args, 0, sizeof(args));
935 args.mp = ip->i_mount;
936 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
937 args.firstblock = *firstblock;
939 * Allocate a block. We know we need only one, since the
940 * file currently fits in an inode.
942 if (*firstblock == NULLFSBLOCK) {
944 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
945 args.type = XFS_ALLOCTYPE_START_BNO;
947 args.fsbno = *firstblock;
948 args.type = XFS_ALLOCTYPE_NEAR_BNO;
951 args.minlen = args.maxlen = args.prod = 1;
952 error = xfs_alloc_vextent(&args);
957 * During a CoW operation, the allocation and bmbt updates occur in
958 * different transactions. The mapping code tries to put new bmbt
959 * blocks near extents being mapped, but the only way to guarantee this
960 * is if the alloc and the mapping happen in a single transaction that
961 * has a block reservation. That isn't the case here, so if we run out
962 * of space we'll try again with another AG.
964 if (xfs_sb_version_hasreflink(&ip->i_mount->m_sb) &&
965 args.fsbno == NULLFSBLOCK &&
966 args.type == XFS_ALLOCTYPE_NEAR_BNO) {
969 /* Can't fail, the space was reserved. */
970 ASSERT(args.fsbno != NULLFSBLOCK);
971 ASSERT(args.len == 1);
972 *firstblock = args.fsbno;
973 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
976 * Initialize the block, copy the data and log the remote buffer.
978 * The callout is responsible for logging because the remote format
979 * might differ from the local format and thus we don't know how much to
980 * log here. Note that init_fn must also set the buffer log item type
983 init_fn(tp, bp, ip, ifp);
985 /* account for the change in fork size */
986 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
987 xfs_bmap_local_to_extents_empty(ip, whichfork);
988 flags |= XFS_ILOG_CORE;
990 xfs_iext_add(ifp, 0, 1);
991 ep = xfs_iext_get_ext(ifp, 0);
992 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
993 trace_xfs_bmap_post_update(ip, 0,
994 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
996 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
997 ip->i_d.di_nblocks = 1;
998 xfs_trans_mod_dquot_byino(tp, ip,
999 XFS_TRANS_DQ_BCOUNT, 1L);
1000 flags |= xfs_ilog_fext(whichfork);
1008 * Called from xfs_bmap_add_attrfork to handle btree format files.
1010 STATIC int /* error */
1011 xfs_bmap_add_attrfork_btree(
1012 xfs_trans_t *tp, /* transaction pointer */
1013 xfs_inode_t *ip, /* incore inode pointer */
1014 xfs_fsblock_t *firstblock, /* first block allocated */
1015 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1016 int *flags) /* inode logging flags */
1018 xfs_btree_cur_t *cur; /* btree cursor */
1019 int error; /* error return value */
1020 xfs_mount_t *mp; /* file system mount struct */
1021 int stat; /* newroot status */
1024 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
1025 *flags |= XFS_ILOG_DBROOT;
1027 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
1028 cur->bc_private.b.dfops = dfops;
1029 cur->bc_private.b.firstblock = *firstblock;
1030 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
1032 /* must be at least one entry */
1033 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
1034 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
1037 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1040 *firstblock = cur->bc_private.b.firstblock;
1041 cur->bc_private.b.allocated = 0;
1042 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1046 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1051 * Called from xfs_bmap_add_attrfork to handle extents format files.
1053 STATIC int /* error */
1054 xfs_bmap_add_attrfork_extents(
1055 xfs_trans_t *tp, /* transaction pointer */
1056 xfs_inode_t *ip, /* incore inode pointer */
1057 xfs_fsblock_t *firstblock, /* first block allocated */
1058 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1059 int *flags) /* inode logging flags */
1061 xfs_btree_cur_t *cur; /* bmap btree cursor */
1062 int error; /* error return value */
1064 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1067 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
1068 flags, XFS_DATA_FORK);
1070 cur->bc_private.b.allocated = 0;
1071 xfs_btree_del_cursor(cur,
1072 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1078 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1079 * different data fork content type needs a different callout to do the
1080 * conversion. Some are basic and only require special block initialisation
1081 * callouts for the data formating, others (directories) are so specialised they
1082 * handle everything themselves.
1084 * XXX (dgc): investigate whether directory conversion can use the generic
1085 * formatting callout. It should be possible - it's just a very complex
1088 STATIC int /* error */
1089 xfs_bmap_add_attrfork_local(
1090 xfs_trans_t *tp, /* transaction pointer */
1091 xfs_inode_t *ip, /* incore inode pointer */
1092 xfs_fsblock_t *firstblock, /* first block allocated */
1093 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1094 int *flags) /* inode logging flags */
1096 xfs_da_args_t dargs; /* args for dir/attr code */
1098 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1101 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1102 memset(&dargs, 0, sizeof(dargs));
1103 dargs.geo = ip->i_mount->m_dir_geo;
1105 dargs.firstblock = firstblock;
1106 dargs.dfops = dfops;
1107 dargs.total = dargs.geo->fsbcount;
1108 dargs.whichfork = XFS_DATA_FORK;
1110 return xfs_dir2_sf_to_block(&dargs);
1113 if (S_ISLNK(VFS_I(ip)->i_mode))
1114 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1115 flags, XFS_DATA_FORK,
1116 xfs_symlink_local_to_remote);
1118 /* should only be called for types that support local format data */
1120 return -EFSCORRUPTED;
1124 * Convert inode from non-attributed to attributed.
1125 * Must not be in a transaction, ip must not be locked.
1127 int /* error code */
1128 xfs_bmap_add_attrfork(
1129 xfs_inode_t *ip, /* incore inode pointer */
1130 int size, /* space new attribute needs */
1131 int rsvd) /* xact may use reserved blks */
1133 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1134 struct xfs_defer_ops dfops; /* freed extent records */
1135 xfs_mount_t *mp; /* mount structure */
1136 xfs_trans_t *tp; /* transaction pointer */
1137 int blks; /* space reservation */
1138 int version = 1; /* superblock attr version */
1139 int logflags; /* logging flags */
1140 int error; /* error return value */
1142 ASSERT(XFS_IFORK_Q(ip) == 0);
1145 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1147 blks = XFS_ADDAFORK_SPACE_RES(mp);
1149 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1150 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1154 xfs_ilock(ip, XFS_ILOCK_EXCL);
1155 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1156 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1157 XFS_QMOPT_RES_REGBLKS);
1160 if (XFS_IFORK_Q(ip))
1162 if (ip->i_d.di_anextents != 0) {
1163 error = -EFSCORRUPTED;
1166 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1168 * For inodes coming from pre-6.2 filesystems.
1170 ASSERT(ip->i_d.di_aformat == 0);
1171 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1174 xfs_trans_ijoin(tp, ip, 0);
1175 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1177 switch (ip->i_d.di_format) {
1178 case XFS_DINODE_FMT_DEV:
1179 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1181 case XFS_DINODE_FMT_UUID:
1182 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1184 case XFS_DINODE_FMT_LOCAL:
1185 case XFS_DINODE_FMT_EXTENTS:
1186 case XFS_DINODE_FMT_BTREE:
1187 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1188 if (!ip->i_d.di_forkoff)
1189 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1190 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1199 ASSERT(ip->i_afp == NULL);
1200 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1201 ip->i_afp->if_flags = XFS_IFEXTENTS;
1203 xfs_defer_init(&dfops, &firstblock);
1204 switch (ip->i_d.di_format) {
1205 case XFS_DINODE_FMT_LOCAL:
1206 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1209 case XFS_DINODE_FMT_EXTENTS:
1210 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1213 case XFS_DINODE_FMT_BTREE:
1214 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1222 xfs_trans_log_inode(tp, ip, logflags);
1225 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1226 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1227 bool log_sb = false;
1229 spin_lock(&mp->m_sb_lock);
1230 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1231 xfs_sb_version_addattr(&mp->m_sb);
1234 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1235 xfs_sb_version_addattr2(&mp->m_sb);
1238 spin_unlock(&mp->m_sb_lock);
1243 error = xfs_defer_finish(&tp, &dfops, NULL);
1246 error = xfs_trans_commit(tp);
1247 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1251 xfs_defer_cancel(&dfops);
1253 xfs_trans_cancel(tp);
1254 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1259 * Internal and external extent tree search functions.
1263 * Read in the extents to if_extents.
1264 * All inode fields are set up by caller, we just traverse the btree
1265 * and copy the records in. If the file system cannot contain unwritten
1266 * extents, the records are checked for no "state" flags.
1269 xfs_bmap_read_extents(
1270 xfs_trans_t *tp, /* transaction pointer */
1271 xfs_inode_t *ip, /* incore inode */
1272 int whichfork) /* data or attr fork */
1274 struct xfs_btree_block *block; /* current btree block */
1275 xfs_fsblock_t bno; /* block # of "block" */
1276 xfs_buf_t *bp; /* buffer for "block" */
1277 int error; /* error return value */
1278 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
1279 xfs_extnum_t i, j; /* index into the extents list */
1280 xfs_ifork_t *ifp; /* fork structure */
1281 int level; /* btree level, for checking */
1282 xfs_mount_t *mp; /* file system mount structure */
1283 __be64 *pp; /* pointer to block address */
1285 xfs_extnum_t room; /* number of entries there's room for */
1288 ifp = XFS_IFORK_PTR(ip, whichfork);
1289 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
1290 XFS_EXTFMT_INODE(ip);
1291 block = ifp->if_broot;
1293 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1295 level = be16_to_cpu(block->bb_level);
1297 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1298 bno = be64_to_cpu(*pp);
1301 * Go down the tree until leaf level is reached, following the first
1302 * pointer (leftmost) at each level.
1304 while (level-- > 0) {
1305 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1306 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1309 block = XFS_BUF_TO_BLOCK(bp);
1312 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1313 bno = be64_to_cpu(*pp);
1314 XFS_WANT_CORRUPTED_GOTO(mp,
1315 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1316 xfs_trans_brelse(tp, bp);
1319 * Here with bp and block set to the leftmost leaf node in the tree.
1321 room = xfs_iext_count(ifp);
1324 * Loop over all leaf nodes. Copy information to the extent records.
1327 xfs_bmbt_rec_t *frp;
1328 xfs_fsblock_t nextbno;
1329 xfs_extnum_t num_recs;
1332 num_recs = xfs_btree_get_numrecs(block);
1333 if (unlikely(i + num_recs > room)) {
1334 ASSERT(i + num_recs <= room);
1335 xfs_warn(ip->i_mount,
1336 "corrupt dinode %Lu, (btree extents).",
1337 (unsigned long long) ip->i_ino);
1338 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1339 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1343 * Read-ahead the next leaf block, if any.
1345 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1346 if (nextbno != NULLFSBLOCK)
1347 xfs_btree_reada_bufl(mp, nextbno, 1,
1350 * Copy records into the extent records.
1352 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1354 for (j = 0; j < num_recs; j++, i++, frp++) {
1355 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1356 trp->l0 = be64_to_cpu(frp->l0);
1357 trp->l1 = be64_to_cpu(frp->l1);
1359 if (exntf == XFS_EXTFMT_NOSTATE) {
1361 * Check all attribute bmap btree records and
1362 * any "older" data bmap btree records for a
1363 * set bit in the "extent flag" position.
1365 if (unlikely(xfs_check_nostate_extents(ifp,
1366 start, num_recs))) {
1367 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1373 xfs_trans_brelse(tp, bp);
1376 * If we've reached the end, stop.
1378 if (bno == NULLFSBLOCK)
1380 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1381 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1384 block = XFS_BUF_TO_BLOCK(bp);
1386 if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
1387 return -EFSCORRUPTED;
1388 ASSERT(i == xfs_iext_count(ifp));
1389 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1392 xfs_trans_brelse(tp, bp);
1393 return -EFSCORRUPTED;
1398 * Search the extent records for the entry containing block bno.
1399 * If bno lies in a hole, point to the next entry. If bno lies
1400 * past eof, *eofp will be set, and *prevp will contain the last
1401 * entry (null if none). Else, *lastxp will be set to the index
1402 * of the found entry; *gotp will contain the entry.
1404 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1405 xfs_bmap_search_multi_extents(
1406 xfs_ifork_t *ifp, /* inode fork pointer */
1407 xfs_fileoff_t bno, /* block number searched for */
1408 int *eofp, /* out: end of file found */
1409 xfs_extnum_t *lastxp, /* out: last extent index */
1410 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1411 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1413 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1414 xfs_extnum_t lastx; /* last extent index */
1417 * Initialize the extent entry structure to catch access to
1418 * uninitialized br_startblock field.
1420 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
1421 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
1422 gotp->br_state = XFS_EXT_INVALID;
1423 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
1424 prevp->br_startoff = NULLFILEOFF;
1426 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
1428 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
1430 if (lastx < xfs_iext_count(ifp)) {
1431 xfs_bmbt_get_all(ep, gotp);
1445 * Search the extents list for the inode, for the extent containing bno.
1446 * If bno lies in a hole, point to the next entry. If bno lies past eof,
1447 * *eofp will be set, and *prevp will contain the last entry (null if none).
1448 * Else, *lastxp will be set to the index of the found
1449 * entry; *gotp will contain the entry.
1451 xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1452 xfs_bmap_search_extents(
1453 xfs_inode_t *ip, /* incore inode pointer */
1454 xfs_fileoff_t bno, /* block number searched for */
1455 int fork, /* data or attr fork */
1456 int *eofp, /* out: end of file found */
1457 xfs_extnum_t *lastxp, /* out: last extent index */
1458 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1459 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1461 xfs_ifork_t *ifp; /* inode fork pointer */
1462 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1464 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
1465 ifp = XFS_IFORK_PTR(ip, fork);
1467 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
1469 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
1470 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
1471 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
1472 "Access to block zero in inode %llu "
1473 "start_block: %llx start_off: %llx "
1474 "blkcnt: %llx extent-state: %x lastx: %x",
1475 (unsigned long long)ip->i_ino,
1476 (unsigned long long)gotp->br_startblock,
1477 (unsigned long long)gotp->br_startoff,
1478 (unsigned long long)gotp->br_blockcount,
1479 gotp->br_state, *lastxp);
1480 *lastxp = NULLEXTNUM;
1488 * Returns the file-relative block number of the first unused block(s)
1489 * in the file with at least "len" logically contiguous blocks free.
1490 * This is the lowest-address hole if the file has holes, else the first block
1491 * past the end of file.
1492 * Return 0 if the file is currently local (in-inode).
1495 xfs_bmap_first_unused(
1496 xfs_trans_t *tp, /* transaction pointer */
1497 xfs_inode_t *ip, /* incore inode */
1498 xfs_extlen_t len, /* size of hole to find */
1499 xfs_fileoff_t *first_unused, /* unused block */
1500 int whichfork) /* data or attr fork */
1502 int error; /* error return value */
1503 int idx; /* extent record index */
1504 xfs_ifork_t *ifp; /* inode fork pointer */
1505 xfs_fileoff_t lastaddr; /* last block number seen */
1506 xfs_fileoff_t lowest; /* lowest useful block */
1507 xfs_fileoff_t max; /* starting useful block */
1508 xfs_fileoff_t off; /* offset for this block */
1509 xfs_extnum_t nextents; /* number of extent entries */
1511 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1512 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1513 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1514 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1518 ifp = XFS_IFORK_PTR(ip, whichfork);
1519 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1520 (error = xfs_iread_extents(tp, ip, whichfork)))
1522 lowest = *first_unused;
1523 nextents = xfs_iext_count(ifp);
1524 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1525 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1526 off = xfs_bmbt_get_startoff(ep);
1528 * See if the hole before this extent will work.
1530 if (off >= lowest + len && off - max >= len) {
1531 *first_unused = max;
1534 lastaddr = off + xfs_bmbt_get_blockcount(ep);
1535 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1537 *first_unused = max;
1542 * Returns the file-relative block number of the last block - 1 before
1543 * last_block (input value) in the file.
1544 * This is not based on i_size, it is based on the extent records.
1545 * Returns 0 for local files, as they do not have extent records.
1548 xfs_bmap_last_before(
1549 xfs_trans_t *tp, /* transaction pointer */
1550 xfs_inode_t *ip, /* incore inode */
1551 xfs_fileoff_t *last_block, /* last block */
1552 int whichfork) /* data or attr fork */
1554 xfs_fileoff_t bno; /* input file offset */
1555 int eof; /* hit end of file */
1556 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
1557 int error; /* error return value */
1558 xfs_bmbt_irec_t got; /* current extent value */
1559 xfs_ifork_t *ifp; /* inode fork pointer */
1560 xfs_extnum_t lastx; /* last extent used */
1561 xfs_bmbt_irec_t prev; /* previous extent value */
1563 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1564 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
1565 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
1567 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1571 ifp = XFS_IFORK_PTR(ip, whichfork);
1572 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1573 (error = xfs_iread_extents(tp, ip, whichfork)))
1575 bno = *last_block - 1;
1576 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
1578 if (eof || xfs_bmbt_get_startoff(ep) > bno) {
1579 if (prev.br_startoff == NULLFILEOFF)
1582 *last_block = prev.br_startoff + prev.br_blockcount;
1585 * Otherwise *last_block is already the right answer.
1591 xfs_bmap_last_extent(
1592 struct xfs_trans *tp,
1593 struct xfs_inode *ip,
1595 struct xfs_bmbt_irec *rec,
1598 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1602 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1603 error = xfs_iread_extents(tp, ip, whichfork);
1608 nextents = xfs_iext_count(ifp);
1609 if (nextents == 0) {
1614 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1620 * Check the last inode extent to determine whether this allocation will result
1621 * in blocks being allocated at the end of the file. When we allocate new data
1622 * blocks at the end of the file which do not start at the previous data block,
1623 * we will try to align the new blocks at stripe unit boundaries.
1625 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1626 * at, or past the EOF.
1630 struct xfs_bmalloca *bma,
1633 struct xfs_bmbt_irec rec;
1638 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1649 * Check if we are allocation or past the last extent, or at least into
1650 * the last delayed allocated extent.
1652 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1653 (bma->offset >= rec.br_startoff &&
1654 isnullstartblock(rec.br_startblock));
1659 * Returns the file-relative block number of the first block past eof in
1660 * the file. This is not based on i_size, it is based on the extent records.
1661 * Returns 0 for local files, as they do not have extent records.
1664 xfs_bmap_last_offset(
1665 struct xfs_inode *ip,
1666 xfs_fileoff_t *last_block,
1669 struct xfs_bmbt_irec rec;
1675 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1678 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1679 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1682 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1683 if (error || is_empty)
1686 *last_block = rec.br_startoff + rec.br_blockcount;
1691 * Returns whether the selected fork of the inode has exactly one
1692 * block or not. For the data fork we check this matches di_size,
1693 * implying the file's range is 0..bsize-1.
1695 int /* 1=>1 block, 0=>otherwise */
1697 xfs_inode_t *ip, /* incore inode */
1698 int whichfork) /* data or attr fork */
1700 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1701 xfs_ifork_t *ifp; /* inode fork pointer */
1702 int rval; /* return value */
1703 xfs_bmbt_irec_t s; /* internal version of extent */
1706 if (whichfork == XFS_DATA_FORK)
1707 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1709 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1711 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1713 ifp = XFS_IFORK_PTR(ip, whichfork);
1714 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1715 ep = xfs_iext_get_ext(ifp, 0);
1716 xfs_bmbt_get_all(ep, &s);
1717 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1718 if (rval && whichfork == XFS_DATA_FORK)
1719 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1724 * Extent tree manipulation functions used during allocation.
1728 * Convert a delayed allocation to a real allocation.
1730 STATIC int /* error */
1731 xfs_bmap_add_extent_delay_real(
1732 struct xfs_bmalloca *bma,
1735 struct xfs_bmbt_irec *new = &bma->got;
1736 int diff; /* temp value */
1737 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1738 int error; /* error return value */
1739 int i; /* temp state */
1740 xfs_ifork_t *ifp; /* inode fork pointer */
1741 xfs_fileoff_t new_endoff; /* end offset of new entry */
1742 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1743 /* left is 0, right is 1, prev is 2 */
1744 int rval=0; /* return value (logging flags) */
1745 int state = 0;/* state bits, accessed thru macros */
1746 xfs_filblks_t da_new; /* new count del alloc blocks used */
1747 xfs_filblks_t da_old; /* old count del alloc blocks used */
1748 xfs_filblks_t temp=0; /* value for da_new calculations */
1749 xfs_filblks_t temp2=0;/* value for da_new calculations */
1750 int tmp_rval; /* partial logging flags */
1751 struct xfs_mount *mp;
1752 xfs_extnum_t *nextents;
1754 mp = bma->ip->i_mount;
1755 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1756 ASSERT(whichfork != XFS_ATTR_FORK);
1757 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1758 &bma->ip->i_d.di_nextents);
1760 ASSERT(bma->idx >= 0);
1761 ASSERT(bma->idx <= xfs_iext_count(ifp));
1762 ASSERT(!isnullstartblock(new->br_startblock));
1764 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1766 XFS_STATS_INC(mp, xs_add_exlist);
1772 if (whichfork == XFS_COW_FORK)
1773 state |= BMAP_COWFORK;
1776 * Set up a bunch of variables to make the tests simpler.
1778 ep = xfs_iext_get_ext(ifp, bma->idx);
1779 xfs_bmbt_get_all(ep, &PREV);
1780 new_endoff = new->br_startoff + new->br_blockcount;
1781 ASSERT(PREV.br_startoff <= new->br_startoff);
1782 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1784 da_old = startblockval(PREV.br_startblock);
1788 * Set flags determining what part of the previous delayed allocation
1789 * extent is being replaced by a real allocation.
1791 if (PREV.br_startoff == new->br_startoff)
1792 state |= BMAP_LEFT_FILLING;
1793 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1794 state |= BMAP_RIGHT_FILLING;
1797 * Check and set flags if this segment has a left neighbor.
1798 * Don't set contiguous if the combined extent would be too large.
1801 state |= BMAP_LEFT_VALID;
1802 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1804 if (isnullstartblock(LEFT.br_startblock))
1805 state |= BMAP_LEFT_DELAY;
1808 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1809 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1810 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1811 LEFT.br_state == new->br_state &&
1812 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1813 state |= BMAP_LEFT_CONTIG;
1816 * Check and set flags if this segment has a right neighbor.
1817 * Don't set contiguous if the combined extent would be too large.
1818 * Also check for all-three-contiguous being too large.
1820 if (bma->idx < xfs_iext_count(ifp) - 1) {
1821 state |= BMAP_RIGHT_VALID;
1822 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1824 if (isnullstartblock(RIGHT.br_startblock))
1825 state |= BMAP_RIGHT_DELAY;
1828 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1829 new_endoff == RIGHT.br_startoff &&
1830 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1831 new->br_state == RIGHT.br_state &&
1832 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1833 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1834 BMAP_RIGHT_FILLING)) !=
1835 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1836 BMAP_RIGHT_FILLING) ||
1837 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1839 state |= BMAP_RIGHT_CONTIG;
1843 * Switch out based on the FILLING and CONTIG state bits.
1845 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1846 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1847 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1848 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1850 * Filling in all of a previously delayed allocation extent.
1851 * The left and right neighbors are both contiguous with new.
1854 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1855 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1856 LEFT.br_blockcount + PREV.br_blockcount +
1857 RIGHT.br_blockcount);
1858 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1860 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1862 if (bma->cur == NULL)
1863 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1865 rval = XFS_ILOG_CORE;
1866 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1867 RIGHT.br_startblock,
1868 RIGHT.br_blockcount, &i);
1871 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1872 error = xfs_btree_delete(bma->cur, &i);
1875 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1876 error = xfs_btree_decrement(bma->cur, 0, &i);
1879 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1880 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1882 LEFT.br_blockcount +
1883 PREV.br_blockcount +
1884 RIGHT.br_blockcount, LEFT.br_state);
1890 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1892 * Filling in all of a previously delayed allocation extent.
1893 * The left neighbor is contiguous, the right is not.
1897 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1898 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1899 LEFT.br_blockcount + PREV.br_blockcount);
1900 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1902 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1903 if (bma->cur == NULL)
1904 rval = XFS_ILOG_DEXT;
1907 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1908 LEFT.br_startblock, LEFT.br_blockcount,
1912 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1913 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1915 LEFT.br_blockcount +
1916 PREV.br_blockcount, LEFT.br_state);
1922 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1924 * Filling in all of a previously delayed allocation extent.
1925 * The right neighbor is contiguous, the left is not.
1927 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1928 xfs_bmbt_set_startblock(ep, new->br_startblock);
1929 xfs_bmbt_set_blockcount(ep,
1930 PREV.br_blockcount + RIGHT.br_blockcount);
1931 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1933 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1934 if (bma->cur == NULL)
1935 rval = XFS_ILOG_DEXT;
1938 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1939 RIGHT.br_startblock,
1940 RIGHT.br_blockcount, &i);
1943 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1944 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1946 PREV.br_blockcount +
1947 RIGHT.br_blockcount, PREV.br_state);
1953 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1955 * Filling in all of a previously delayed allocation extent.
1956 * Neither the left nor right neighbors are contiguous with
1959 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1960 xfs_bmbt_set_startblock(ep, new->br_startblock);
1961 xfs_bmbt_set_state(ep, new->br_state);
1962 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1965 if (bma->cur == NULL)
1966 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1968 rval = XFS_ILOG_CORE;
1969 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1970 new->br_startblock, new->br_blockcount,
1974 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1975 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1976 error = xfs_btree_insert(bma->cur, &i);
1979 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1983 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1985 * Filling in the first part of a previous delayed allocation.
1986 * The left neighbor is contiguous.
1988 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1989 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1990 LEFT.br_blockcount + new->br_blockcount);
1991 xfs_bmbt_set_startoff(ep,
1992 PREV.br_startoff + new->br_blockcount);
1993 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1995 temp = PREV.br_blockcount - new->br_blockcount;
1996 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1997 xfs_bmbt_set_blockcount(ep, temp);
1998 if (bma->cur == NULL)
1999 rval = XFS_ILOG_DEXT;
2002 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
2003 LEFT.br_startblock, LEFT.br_blockcount,
2007 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2008 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
2010 LEFT.br_blockcount +
2016 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2017 startblockval(PREV.br_startblock));
2018 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2019 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2024 case BMAP_LEFT_FILLING:
2026 * Filling in the first part of a previous delayed allocation.
2027 * The left neighbor is not contiguous.
2029 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2030 xfs_bmbt_set_startoff(ep, new_endoff);
2031 temp = PREV.br_blockcount - new->br_blockcount;
2032 xfs_bmbt_set_blockcount(ep, temp);
2033 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
2035 if (bma->cur == NULL)
2036 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2038 rval = XFS_ILOG_CORE;
2039 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2040 new->br_startblock, new->br_blockcount,
2044 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2045 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2046 error = xfs_btree_insert(bma->cur, &i);
2049 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2052 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2053 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2054 bma->firstblock, bma->dfops,
2055 &bma->cur, 1, &tmp_rval, whichfork);
2060 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2061 startblockval(PREV.br_startblock) -
2062 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2063 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
2064 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2065 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2068 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2070 * Filling in the last part of a previous delayed allocation.
2071 * The right neighbor is contiguous with the new allocation.
2073 temp = PREV.br_blockcount - new->br_blockcount;
2074 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2075 xfs_bmbt_set_blockcount(ep, temp);
2076 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
2077 new->br_startoff, new->br_startblock,
2078 new->br_blockcount + RIGHT.br_blockcount,
2080 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2081 if (bma->cur == NULL)
2082 rval = XFS_ILOG_DEXT;
2085 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
2086 RIGHT.br_startblock,
2087 RIGHT.br_blockcount, &i);
2090 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2091 error = xfs_bmbt_update(bma->cur, new->br_startoff,
2093 new->br_blockcount +
2094 RIGHT.br_blockcount,
2100 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2101 startblockval(PREV.br_startblock));
2102 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2103 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2104 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2109 case BMAP_RIGHT_FILLING:
2111 * Filling in the last part of a previous delayed allocation.
2112 * The right neighbor is not contiguous.
2114 temp = PREV.br_blockcount - new->br_blockcount;
2115 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2116 xfs_bmbt_set_blockcount(ep, temp);
2117 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
2119 if (bma->cur == NULL)
2120 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2122 rval = XFS_ILOG_CORE;
2123 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2124 new->br_startblock, new->br_blockcount,
2128 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2129 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2130 error = xfs_btree_insert(bma->cur, &i);
2133 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2136 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2137 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2138 bma->firstblock, bma->dfops, &bma->cur, 1,
2139 &tmp_rval, whichfork);
2144 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2145 startblockval(PREV.br_startblock) -
2146 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2147 ep = xfs_iext_get_ext(ifp, bma->idx);
2148 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2149 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2156 * Filling in the middle part of a previous delayed allocation.
2157 * Contiguity is impossible here.
2158 * This case is avoided almost all the time.
2160 * We start with a delayed allocation:
2162 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2165 * and we are allocating:
2166 * +rrrrrrrrrrrrrrrrr+
2169 * and we set it up for insertion as:
2170 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2172 * PREV @ idx LEFT RIGHT
2173 * inserted at idx + 1
2175 temp = new->br_startoff - PREV.br_startoff;
2176 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2177 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2178 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2180 RIGHT.br_state = PREV.br_state;
2181 RIGHT.br_startblock = nullstartblock(
2182 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2183 RIGHT.br_startoff = new_endoff;
2184 RIGHT.br_blockcount = temp2;
2185 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2186 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2188 if (bma->cur == NULL)
2189 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2191 rval = XFS_ILOG_CORE;
2192 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2193 new->br_startblock, new->br_blockcount,
2197 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2198 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2199 error = xfs_btree_insert(bma->cur, &i);
2202 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2205 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2206 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2207 bma->firstblock, bma->dfops, &bma->cur,
2208 1, &tmp_rval, whichfork);
2213 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2214 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2215 diff = (int)(temp + temp2 -
2216 (startblockval(PREV.br_startblock) -
2218 bma->cur->bc_private.b.allocated : 0)));
2220 error = xfs_mod_fdblocks(bma->ip->i_mount,
2221 -((int64_t)diff), false);
2227 ep = xfs_iext_get_ext(ifp, bma->idx);
2228 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2229 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2230 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2231 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2232 nullstartblock((int)temp2));
2233 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2236 da_new = temp + temp2;
2239 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2240 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2241 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2242 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2243 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2244 case BMAP_LEFT_CONTIG:
2245 case BMAP_RIGHT_CONTIG:
2247 * These cases are all impossible.
2252 /* add reverse mapping */
2253 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2257 /* convert to a btree if necessary */
2258 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2259 int tmp_logflags; /* partial log flag return val */
2261 ASSERT(bma->cur == NULL);
2262 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2263 bma->firstblock, bma->dfops, &bma->cur,
2264 da_old > 0, &tmp_logflags, whichfork);
2265 bma->logflags |= tmp_logflags;
2270 /* adjust for changes in reserved delayed indirect blocks */
2271 if (da_old || da_new) {
2274 temp += bma->cur->bc_private.b.allocated;
2276 xfs_mod_fdblocks(bma->ip->i_mount,
2277 (int64_t)(da_old - temp), false);
2280 /* clear out the allocated field, done with it now in any case. */
2282 bma->cur->bc_private.b.allocated = 0;
2284 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2286 if (whichfork != XFS_COW_FORK)
2287 bma->logflags |= rval;
2295 * Convert an unwritten allocation to a real allocation or vice versa.
2297 STATIC int /* error */
2298 xfs_bmap_add_extent_unwritten_real(
2299 struct xfs_trans *tp,
2300 xfs_inode_t *ip, /* incore inode pointer */
2302 xfs_extnum_t *idx, /* extent number to update/insert */
2303 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2304 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2305 xfs_fsblock_t *first, /* pointer to firstblock variable */
2306 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2307 int *logflagsp) /* inode logging flags */
2309 xfs_btree_cur_t *cur; /* btree cursor */
2310 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2311 int error; /* error return value */
2312 int i; /* temp state */
2313 xfs_ifork_t *ifp; /* inode fork pointer */
2314 xfs_fileoff_t new_endoff; /* end offset of new entry */
2315 xfs_exntst_t newext; /* new extent state */
2316 xfs_exntst_t oldext; /* old extent state */
2317 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2318 /* left is 0, right is 1, prev is 2 */
2319 int rval=0; /* return value (logging flags) */
2320 int state = 0;/* state bits, accessed thru macros */
2321 struct xfs_mount *mp = ip->i_mount;
2326 ifp = XFS_IFORK_PTR(ip, whichfork);
2327 if (whichfork == XFS_COW_FORK)
2328 state |= BMAP_COWFORK;
2331 ASSERT(*idx <= xfs_iext_count(ifp));
2332 ASSERT(!isnullstartblock(new->br_startblock));
2334 XFS_STATS_INC(mp, xs_add_exlist);
2341 * Set up a bunch of variables to make the tests simpler.
2344 ep = xfs_iext_get_ext(ifp, *idx);
2345 xfs_bmbt_get_all(ep, &PREV);
2346 newext = new->br_state;
2347 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2348 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2349 ASSERT(PREV.br_state == oldext);
2350 new_endoff = new->br_startoff + new->br_blockcount;
2351 ASSERT(PREV.br_startoff <= new->br_startoff);
2352 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2355 * Set flags determining what part of the previous oldext allocation
2356 * extent is being replaced by a newext allocation.
2358 if (PREV.br_startoff == new->br_startoff)
2359 state |= BMAP_LEFT_FILLING;
2360 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2361 state |= BMAP_RIGHT_FILLING;
2364 * Check and set flags if this segment has a left neighbor.
2365 * Don't set contiguous if the combined extent would be too large.
2368 state |= BMAP_LEFT_VALID;
2369 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2371 if (isnullstartblock(LEFT.br_startblock))
2372 state |= BMAP_LEFT_DELAY;
2375 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2376 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2377 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2378 LEFT.br_state == newext &&
2379 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2380 state |= BMAP_LEFT_CONTIG;
2383 * Check and set flags if this segment has a right neighbor.
2384 * Don't set contiguous if the combined extent would be too large.
2385 * Also check for all-three-contiguous being too large.
2387 if (*idx < xfs_iext_count(ifp) - 1) {
2388 state |= BMAP_RIGHT_VALID;
2389 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2390 if (isnullstartblock(RIGHT.br_startblock))
2391 state |= BMAP_RIGHT_DELAY;
2394 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2395 new_endoff == RIGHT.br_startoff &&
2396 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2397 newext == RIGHT.br_state &&
2398 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2399 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2400 BMAP_RIGHT_FILLING)) !=
2401 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2402 BMAP_RIGHT_FILLING) ||
2403 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2405 state |= BMAP_RIGHT_CONTIG;
2408 * Switch out based on the FILLING and CONTIG state bits.
2410 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2411 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2412 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2413 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2415 * Setting all of a previous oldext extent to newext.
2416 * The left and right neighbors are both contiguous with new.
2420 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2421 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2422 LEFT.br_blockcount + PREV.br_blockcount +
2423 RIGHT.br_blockcount);
2424 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2426 xfs_iext_remove(ip, *idx + 1, 2, state);
2427 XFS_IFORK_NEXT_SET(ip, whichfork,
2428 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2430 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2432 rval = XFS_ILOG_CORE;
2433 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2434 RIGHT.br_startblock,
2435 RIGHT.br_blockcount, &i)))
2437 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2438 if ((error = xfs_btree_delete(cur, &i)))
2440 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2441 if ((error = xfs_btree_decrement(cur, 0, &i)))
2443 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2444 if ((error = xfs_btree_delete(cur, &i)))
2446 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2447 if ((error = xfs_btree_decrement(cur, 0, &i)))
2449 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2450 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2452 LEFT.br_blockcount + PREV.br_blockcount +
2453 RIGHT.br_blockcount, LEFT.br_state)))
2458 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2460 * Setting all of a previous oldext extent to newext.
2461 * The left neighbor is contiguous, the right is not.
2465 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2466 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2467 LEFT.br_blockcount + PREV.br_blockcount);
2468 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2470 xfs_iext_remove(ip, *idx + 1, 1, state);
2471 XFS_IFORK_NEXT_SET(ip, whichfork,
2472 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2474 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2476 rval = XFS_ILOG_CORE;
2477 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2478 PREV.br_startblock, PREV.br_blockcount,
2481 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2482 if ((error = xfs_btree_delete(cur, &i)))
2484 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2485 if ((error = xfs_btree_decrement(cur, 0, &i)))
2487 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2488 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2490 LEFT.br_blockcount + PREV.br_blockcount,
2496 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2498 * Setting all of a previous oldext extent to newext.
2499 * The right neighbor is contiguous, the left is not.
2501 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2502 xfs_bmbt_set_blockcount(ep,
2503 PREV.br_blockcount + RIGHT.br_blockcount);
2504 xfs_bmbt_set_state(ep, newext);
2505 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2506 xfs_iext_remove(ip, *idx + 1, 1, state);
2507 XFS_IFORK_NEXT_SET(ip, whichfork,
2508 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2510 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2512 rval = XFS_ILOG_CORE;
2513 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2514 RIGHT.br_startblock,
2515 RIGHT.br_blockcount, &i)))
2517 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2518 if ((error = xfs_btree_delete(cur, &i)))
2520 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2521 if ((error = xfs_btree_decrement(cur, 0, &i)))
2523 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2524 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2526 new->br_blockcount + RIGHT.br_blockcount,
2532 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2534 * Setting all of a previous oldext extent to newext.
2535 * Neither the left nor right neighbors are contiguous with
2538 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2539 xfs_bmbt_set_state(ep, newext);
2540 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2543 rval = XFS_ILOG_DEXT;
2546 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2547 new->br_startblock, new->br_blockcount,
2550 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2551 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2552 new->br_startblock, new->br_blockcount,
2558 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2560 * Setting the first part of a previous oldext extent to newext.
2561 * The left neighbor is contiguous.
2563 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2564 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2565 LEFT.br_blockcount + new->br_blockcount);
2566 xfs_bmbt_set_startoff(ep,
2567 PREV.br_startoff + new->br_blockcount);
2568 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2570 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2571 xfs_bmbt_set_startblock(ep,
2572 new->br_startblock + new->br_blockcount);
2573 xfs_bmbt_set_blockcount(ep,
2574 PREV.br_blockcount - new->br_blockcount);
2575 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2580 rval = XFS_ILOG_DEXT;
2583 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2584 PREV.br_startblock, PREV.br_blockcount,
2587 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2588 if ((error = xfs_bmbt_update(cur,
2589 PREV.br_startoff + new->br_blockcount,
2590 PREV.br_startblock + new->br_blockcount,
2591 PREV.br_blockcount - new->br_blockcount,
2594 if ((error = xfs_btree_decrement(cur, 0, &i)))
2596 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2598 LEFT.br_blockcount + new->br_blockcount,
2605 case BMAP_LEFT_FILLING:
2607 * Setting the first part of a previous oldext extent to newext.
2608 * The left neighbor is not contiguous.
2610 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2611 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2612 xfs_bmbt_set_startoff(ep, new_endoff);
2613 xfs_bmbt_set_blockcount(ep,
2614 PREV.br_blockcount - new->br_blockcount);
2615 xfs_bmbt_set_startblock(ep,
2616 new->br_startblock + new->br_blockcount);
2617 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2619 xfs_iext_insert(ip, *idx, 1, new, state);
2620 XFS_IFORK_NEXT_SET(ip, whichfork,
2621 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2623 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2625 rval = XFS_ILOG_CORE;
2626 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2627 PREV.br_startblock, PREV.br_blockcount,
2630 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2631 if ((error = xfs_bmbt_update(cur,
2632 PREV.br_startoff + new->br_blockcount,
2633 PREV.br_startblock + new->br_blockcount,
2634 PREV.br_blockcount - new->br_blockcount,
2637 cur->bc_rec.b = *new;
2638 if ((error = xfs_btree_insert(cur, &i)))
2640 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2644 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2646 * Setting the last part of a previous oldext extent to newext.
2647 * The right neighbor is contiguous with the new allocation.
2649 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2650 xfs_bmbt_set_blockcount(ep,
2651 PREV.br_blockcount - new->br_blockcount);
2652 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2656 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2657 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2658 new->br_startoff, new->br_startblock,
2659 new->br_blockcount + RIGHT.br_blockcount, newext);
2660 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2663 rval = XFS_ILOG_DEXT;
2666 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2668 PREV.br_blockcount, &i)))
2670 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2671 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2673 PREV.br_blockcount - new->br_blockcount,
2676 if ((error = xfs_btree_increment(cur, 0, &i)))
2678 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2680 new->br_blockcount + RIGHT.br_blockcount,
2686 case BMAP_RIGHT_FILLING:
2688 * Setting the last part of a previous oldext extent to newext.
2689 * The right neighbor is not contiguous.
2691 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2692 xfs_bmbt_set_blockcount(ep,
2693 PREV.br_blockcount - new->br_blockcount);
2694 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2697 xfs_iext_insert(ip, *idx, 1, new, state);
2699 XFS_IFORK_NEXT_SET(ip, whichfork,
2700 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2702 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2704 rval = XFS_ILOG_CORE;
2705 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2706 PREV.br_startblock, PREV.br_blockcount,
2709 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2710 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2712 PREV.br_blockcount - new->br_blockcount,
2715 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2716 new->br_startblock, new->br_blockcount,
2719 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2720 cur->bc_rec.b.br_state = new->br_state;
2721 if ((error = xfs_btree_insert(cur, &i)))
2723 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2729 * Setting the middle part of a previous oldext extent to
2730 * newext. Contiguity is impossible here.
2731 * One extent becomes three extents.
2733 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2734 xfs_bmbt_set_blockcount(ep,
2735 new->br_startoff - PREV.br_startoff);
2736 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2739 r[1].br_startoff = new_endoff;
2740 r[1].br_blockcount =
2741 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2742 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2743 r[1].br_state = oldext;
2746 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2748 XFS_IFORK_NEXT_SET(ip, whichfork,
2749 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2751 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2753 rval = XFS_ILOG_CORE;
2754 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2755 PREV.br_startblock, PREV.br_blockcount,
2758 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2759 /* new right extent - oldext */
2760 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2761 r[1].br_startblock, r[1].br_blockcount,
2764 /* new left extent - oldext */
2765 cur->bc_rec.b = PREV;
2766 cur->bc_rec.b.br_blockcount =
2767 new->br_startoff - PREV.br_startoff;
2768 if ((error = xfs_btree_insert(cur, &i)))
2770 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2772 * Reset the cursor to the position of the new extent
2773 * we are about to insert as we can't trust it after
2774 * the previous insert.
2776 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2777 new->br_startblock, new->br_blockcount,
2780 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2781 /* new middle extent - newext */
2782 cur->bc_rec.b.br_state = new->br_state;
2783 if ((error = xfs_btree_insert(cur, &i)))
2785 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2789 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2790 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2791 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2792 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2793 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2794 case BMAP_LEFT_CONTIG:
2795 case BMAP_RIGHT_CONTIG:
2797 * These cases are all impossible.
2802 /* update reverse mappings */
2803 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
2807 /* convert to a btree if necessary */
2808 if (xfs_bmap_needs_btree(ip, whichfork)) {
2809 int tmp_logflags; /* partial log flag return val */
2811 ASSERT(cur == NULL);
2812 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2813 0, &tmp_logflags, whichfork);
2814 *logflagsp |= tmp_logflags;
2819 /* clear out the allocated field, done with it now in any case. */
2821 cur->bc_private.b.allocated = 0;
2825 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2835 * Convert a hole to a delayed allocation.
2838 xfs_bmap_add_extent_hole_delay(
2839 xfs_inode_t *ip, /* incore inode pointer */
2841 xfs_extnum_t *idx, /* extent number to update/insert */
2842 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2844 xfs_ifork_t *ifp; /* inode fork pointer */
2845 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2846 xfs_filblks_t newlen=0; /* new indirect size */
2847 xfs_filblks_t oldlen=0; /* old indirect size */
2848 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2849 int state; /* state bits, accessed thru macros */
2850 xfs_filblks_t temp=0; /* temp for indirect calculations */
2852 ifp = XFS_IFORK_PTR(ip, whichfork);
2854 if (whichfork == XFS_COW_FORK)
2855 state |= BMAP_COWFORK;
2856 ASSERT(isnullstartblock(new->br_startblock));
2859 * Check and set flags if this segment has a left neighbor
2862 state |= BMAP_LEFT_VALID;
2863 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2865 if (isnullstartblock(left.br_startblock))
2866 state |= BMAP_LEFT_DELAY;
2870 * Check and set flags if the current (right) segment exists.
2871 * If it doesn't exist, we're converting the hole at end-of-file.
2873 if (*idx < xfs_iext_count(ifp)) {
2874 state |= BMAP_RIGHT_VALID;
2875 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2877 if (isnullstartblock(right.br_startblock))
2878 state |= BMAP_RIGHT_DELAY;
2882 * Set contiguity flags on the left and right neighbors.
2883 * Don't let extents get too large, even if the pieces are contiguous.
2885 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2886 left.br_startoff + left.br_blockcount == new->br_startoff &&
2887 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2888 state |= BMAP_LEFT_CONTIG;
2890 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2891 new->br_startoff + new->br_blockcount == right.br_startoff &&
2892 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2893 (!(state & BMAP_LEFT_CONTIG) ||
2894 (left.br_blockcount + new->br_blockcount +
2895 right.br_blockcount <= MAXEXTLEN)))
2896 state |= BMAP_RIGHT_CONTIG;
2899 * Switch out based on the contiguity flags.
2901 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2902 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2904 * New allocation is contiguous with delayed allocations
2905 * on the left and on the right.
2906 * Merge all three into a single extent record.
2909 temp = left.br_blockcount + new->br_blockcount +
2910 right.br_blockcount;
2912 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2913 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2914 oldlen = startblockval(left.br_startblock) +
2915 startblockval(new->br_startblock) +
2916 startblockval(right.br_startblock);
2917 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2919 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2920 nullstartblock((int)newlen));
2921 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2923 xfs_iext_remove(ip, *idx + 1, 1, state);
2926 case BMAP_LEFT_CONTIG:
2928 * New allocation is contiguous with a delayed allocation
2930 * Merge the new allocation with the left neighbor.
2933 temp = left.br_blockcount + new->br_blockcount;
2935 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2936 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2937 oldlen = startblockval(left.br_startblock) +
2938 startblockval(new->br_startblock);
2939 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2941 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2942 nullstartblock((int)newlen));
2943 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2946 case BMAP_RIGHT_CONTIG:
2948 * New allocation is contiguous with a delayed allocation
2950 * Merge the new allocation with the right neighbor.
2952 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2953 temp = new->br_blockcount + right.br_blockcount;
2954 oldlen = startblockval(new->br_startblock) +
2955 startblockval(right.br_startblock);
2956 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2958 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2960 nullstartblock((int)newlen), temp, right.br_state);
2961 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2966 * New allocation is not contiguous with another
2967 * delayed allocation.
2968 * Insert a new entry.
2970 oldlen = newlen = 0;
2971 xfs_iext_insert(ip, *idx, 1, new, state);
2974 if (oldlen != newlen) {
2975 ASSERT(oldlen > newlen);
2976 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2979 * Nothing to do for disk quota accounting here.
2985 * Convert a hole to a real allocation.
2987 STATIC int /* error */
2988 xfs_bmap_add_extent_hole_real(
2989 struct xfs_bmalloca *bma,
2992 struct xfs_bmbt_irec *new = &bma->got;
2993 int error; /* error return value */
2994 int i; /* temp state */
2995 xfs_ifork_t *ifp; /* inode fork pointer */
2996 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2997 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2998 int rval=0; /* return value (logging flags) */
2999 int state; /* state bits, accessed thru macros */
3000 struct xfs_mount *mp;
3002 mp = bma->ip->i_mount;
3003 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
3005 ASSERT(bma->idx >= 0);
3006 ASSERT(bma->idx <= xfs_iext_count(ifp));
3007 ASSERT(!isnullstartblock(new->br_startblock));
3009 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
3010 ASSERT(whichfork != XFS_COW_FORK);
3012 XFS_STATS_INC(mp, xs_add_exlist);
3015 if (whichfork == XFS_ATTR_FORK)
3016 state |= BMAP_ATTRFORK;
3019 * Check and set flags if this segment has a left neighbor.
3022 state |= BMAP_LEFT_VALID;
3023 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
3024 if (isnullstartblock(left.br_startblock))
3025 state |= BMAP_LEFT_DELAY;
3029 * Check and set flags if this segment has a current value.
3030 * Not true if we're inserting into the "hole" at eof.
3032 if (bma->idx < xfs_iext_count(ifp)) {
3033 state |= BMAP_RIGHT_VALID;
3034 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
3035 if (isnullstartblock(right.br_startblock))
3036 state |= BMAP_RIGHT_DELAY;
3040 * We're inserting a real allocation between "left" and "right".
3041 * Set the contiguity flags. Don't let extents get too large.
3043 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
3044 left.br_startoff + left.br_blockcount == new->br_startoff &&
3045 left.br_startblock + left.br_blockcount == new->br_startblock &&
3046 left.br_state == new->br_state &&
3047 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
3048 state |= BMAP_LEFT_CONTIG;
3050 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
3051 new->br_startoff + new->br_blockcount == right.br_startoff &&
3052 new->br_startblock + new->br_blockcount == right.br_startblock &&
3053 new->br_state == right.br_state &&
3054 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
3055 (!(state & BMAP_LEFT_CONTIG) ||
3056 left.br_blockcount + new->br_blockcount +
3057 right.br_blockcount <= MAXEXTLEN))
3058 state |= BMAP_RIGHT_CONTIG;
3062 * Select which case we're in here, and implement it.
3064 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
3065 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
3067 * New allocation is contiguous with real allocations on the
3068 * left and on the right.
3069 * Merge all three into a single extent record.
3072 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3073 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3074 left.br_blockcount + new->br_blockcount +
3075 right.br_blockcount);
3076 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3078 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
3080 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3081 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
3082 if (bma->cur == NULL) {
3083 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3085 rval = XFS_ILOG_CORE;
3086 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
3087 right.br_startblock, right.br_blockcount,
3091 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3092 error = xfs_btree_delete(bma->cur, &i);
3095 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3096 error = xfs_btree_decrement(bma->cur, 0, &i);
3099 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3100 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3102 left.br_blockcount +
3103 new->br_blockcount +
3104 right.br_blockcount,
3111 case BMAP_LEFT_CONTIG:
3113 * New allocation is contiguous with a real allocation
3115 * Merge the new allocation with the left neighbor.
3118 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3119 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3120 left.br_blockcount + new->br_blockcount);
3121 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3123 if (bma->cur == NULL) {
3124 rval = xfs_ilog_fext(whichfork);
3127 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
3128 left.br_startblock, left.br_blockcount,
3132 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3133 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3135 left.br_blockcount +
3143 case BMAP_RIGHT_CONTIG:
3145 * New allocation is contiguous with a real allocation
3147 * Merge the new allocation with the right neighbor.
3149 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3150 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
3151 new->br_startoff, new->br_startblock,
3152 new->br_blockcount + right.br_blockcount,
3154 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3156 if (bma->cur == NULL) {
3157 rval = xfs_ilog_fext(whichfork);
3160 error = xfs_bmbt_lookup_eq(bma->cur,
3162 right.br_startblock,
3163 right.br_blockcount, &i);
3166 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3167 error = xfs_bmbt_update(bma->cur, new->br_startoff,
3169 new->br_blockcount +
3170 right.br_blockcount,
3179 * New allocation is not contiguous with another
3181 * Insert a new entry.
3183 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
3184 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3185 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
3186 if (bma->cur == NULL) {
3187 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3189 rval = XFS_ILOG_CORE;
3190 error = xfs_bmbt_lookup_eq(bma->cur,
3193 new->br_blockcount, &i);
3196 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3197 bma->cur->bc_rec.b.br_state = new->br_state;
3198 error = xfs_btree_insert(bma->cur, &i);
3201 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3206 /* add reverse mapping */
3207 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
3211 /* convert to a btree if necessary */
3212 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
3213 int tmp_logflags; /* partial log flag return val */
3215 ASSERT(bma->cur == NULL);
3216 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
3217 bma->firstblock, bma->dfops, &bma->cur,
3218 0, &tmp_logflags, whichfork);
3219 bma->logflags |= tmp_logflags;
3224 /* clear out the allocated field, done with it now in any case. */
3226 bma->cur->bc_private.b.allocated = 0;
3228 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
3230 bma->logflags |= rval;
3235 * Functions used in the extent read, allocate and remove paths
3239 * Adjust the size of the new extent based on di_extsize and rt extsize.
3242 xfs_bmap_extsize_align(
3244 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3245 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3246 xfs_extlen_t extsz, /* align to this extent size */
3247 int rt, /* is this a realtime inode? */
3248 int eof, /* is extent at end-of-file? */
3249 int delay, /* creating delalloc extent? */
3250 int convert, /* overwriting unwritten extent? */
3251 xfs_fileoff_t *offp, /* in/out: aligned offset */
3252 xfs_extlen_t *lenp) /* in/out: aligned length */
3254 xfs_fileoff_t orig_off; /* original offset */
3255 xfs_extlen_t orig_alen; /* original length */
3256 xfs_fileoff_t orig_end; /* original off+len */
3257 xfs_fileoff_t nexto; /* next file offset */
3258 xfs_fileoff_t prevo; /* previous file offset */
3259 xfs_fileoff_t align_off; /* temp for offset */
3260 xfs_extlen_t align_alen; /* temp for length */
3261 xfs_extlen_t temp; /* temp for calculations */
3266 orig_off = align_off = *offp;
3267 orig_alen = align_alen = *lenp;
3268 orig_end = orig_off + orig_alen;
3271 * If this request overlaps an existing extent, then don't
3272 * attempt to perform any additional alignment.
3274 if (!delay && !eof &&
3275 (orig_off >= gotp->br_startoff) &&
3276 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3281 * If the file offset is unaligned vs. the extent size
3282 * we need to align it. This will be possible unless
3283 * the file was previously written with a kernel that didn't
3284 * perform this alignment, or if a truncate shot us in the
3287 temp = do_mod(orig_off, extsz);
3293 /* Same adjustment for the end of the requested area. */
3294 temp = (align_alen % extsz);
3296 align_alen += extsz - temp;
3299 * For large extent hint sizes, the aligned extent might be larger than
3300 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3301 * the length back under MAXEXTLEN. The outer allocation loops handle
3302 * short allocation just fine, so it is safe to do this. We only want to
3303 * do it when we are forced to, though, because it means more allocation
3304 * operations are required.
3306 while (align_alen > MAXEXTLEN)
3307 align_alen -= extsz;
3308 ASSERT(align_alen <= MAXEXTLEN);
3311 * If the previous block overlaps with this proposed allocation
3312 * then move the start forward without adjusting the length.
3314 if (prevp->br_startoff != NULLFILEOFF) {
3315 if (prevp->br_startblock == HOLESTARTBLOCK)
3316 prevo = prevp->br_startoff;
3318 prevo = prevp->br_startoff + prevp->br_blockcount;
3321 if (align_off != orig_off && align_off < prevo)
3324 * If the next block overlaps with this proposed allocation
3325 * then move the start back without adjusting the length,
3326 * but not before offset 0.
3327 * This may of course make the start overlap previous block,
3328 * and if we hit the offset 0 limit then the next block
3329 * can still overlap too.
3331 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3332 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3333 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3334 nexto = gotp->br_startoff + gotp->br_blockcount;
3336 nexto = gotp->br_startoff;
3338 nexto = NULLFILEOFF;
3340 align_off + align_alen != orig_end &&
3341 align_off + align_alen > nexto)
3342 align_off = nexto > align_alen ? nexto - align_alen : 0;
3344 * If we're now overlapping the next or previous extent that
3345 * means we can't fit an extsz piece in this hole. Just move
3346 * the start forward to the first valid spot and set
3347 * the length so we hit the end.
3349 if (align_off != orig_off && align_off < prevo)
3351 if (align_off + align_alen != orig_end &&
3352 align_off + align_alen > nexto &&
3353 nexto != NULLFILEOFF) {
3354 ASSERT(nexto > prevo);
3355 align_alen = nexto - align_off;
3359 * If realtime, and the result isn't a multiple of the realtime
3360 * extent size we need to remove blocks until it is.
3362 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3364 * We're not covering the original request, or
3365 * we won't be able to once we fix the length.
3367 if (orig_off < align_off ||
3368 orig_end > align_off + align_alen ||
3369 align_alen - temp < orig_alen)
3372 * Try to fix it by moving the start up.
3374 if (align_off + temp <= orig_off) {
3379 * Try to fix it by moving the end in.
3381 else if (align_off + align_alen - temp >= orig_end)
3384 * Set the start to the minimum then trim the length.
3387 align_alen -= orig_off - align_off;
3388 align_off = orig_off;
3389 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3392 * Result doesn't cover the request, fail it.
3394 if (orig_off < align_off || orig_end > align_off + align_alen)
3397 ASSERT(orig_off >= align_off);
3398 /* see MAXEXTLEN handling above */
3399 ASSERT(orig_end <= align_off + align_alen ||
3400 align_alen + extsz > MAXEXTLEN);
3404 if (!eof && gotp->br_startoff != NULLFILEOFF)
3405 ASSERT(align_off + align_alen <= gotp->br_startoff);
3406 if (prevp->br_startoff != NULLFILEOFF)
3407 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3415 #define XFS_ALLOC_GAP_UNITS 4
3419 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3421 xfs_fsblock_t adjust; /* adjustment to block numbers */
3422 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3423 xfs_mount_t *mp; /* mount point structure */
3424 int nullfb; /* true if ap->firstblock isn't set */
3425 int rt; /* true if inode is realtime */
3427 #define ISVALID(x,y) \
3429 (x) < mp->m_sb.sb_rblocks : \
3430 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3431 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3432 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3434 mp = ap->ip->i_mount;
3435 nullfb = *ap->firstblock == NULLFSBLOCK;
3436 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3437 xfs_alloc_is_userdata(ap->datatype);
3438 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3440 * If allocating at eof, and there's a previous real block,
3441 * try to use its last block as our starting point.
3443 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3444 !isnullstartblock(ap->prev.br_startblock) &&
3445 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3446 ap->prev.br_startblock)) {
3447 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3449 * Adjust for the gap between prevp and us.
3451 adjust = ap->offset -
3452 (ap->prev.br_startoff + ap->prev.br_blockcount);
3454 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3455 ap->blkno += adjust;
3458 * If not at eof, then compare the two neighbor blocks.
3459 * Figure out whether either one gives us a good starting point,
3460 * and pick the better one.
3462 else if (!ap->eof) {
3463 xfs_fsblock_t gotbno; /* right side block number */
3464 xfs_fsblock_t gotdiff=0; /* right side difference */
3465 xfs_fsblock_t prevbno; /* left side block number */
3466 xfs_fsblock_t prevdiff=0; /* left side difference */
3469 * If there's a previous (left) block, select a requested
3470 * start block based on it.
3472 if (ap->prev.br_startoff != NULLFILEOFF &&
3473 !isnullstartblock(ap->prev.br_startblock) &&
3474 (prevbno = ap->prev.br_startblock +
3475 ap->prev.br_blockcount) &&
3476 ISVALID(prevbno, ap->prev.br_startblock)) {
3478 * Calculate gap to end of previous block.
3480 adjust = prevdiff = ap->offset -
3481 (ap->prev.br_startoff +
3482 ap->prev.br_blockcount);
3484 * Figure the startblock based on the previous block's
3485 * end and the gap size.
3487 * If the gap is large relative to the piece we're
3488 * allocating, or using it gives us an invalid block
3489 * number, then just use the end of the previous block.
3491 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3492 ISVALID(prevbno + prevdiff,
3493 ap->prev.br_startblock))
3498 * If the firstblock forbids it, can't use it,
3501 if (!rt && !nullfb &&
3502 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3503 prevbno = NULLFSBLOCK;
3506 * No previous block or can't follow it, just default.
3509 prevbno = NULLFSBLOCK;
3511 * If there's a following (right) block, select a requested
3512 * start block based on it.
3514 if (!isnullstartblock(ap->got.br_startblock)) {
3516 * Calculate gap to start of next block.
3518 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3520 * Figure the startblock based on the next block's
3521 * start and the gap size.
3523 gotbno = ap->got.br_startblock;
3526 * If the gap is large relative to the piece we're
3527 * allocating, or using it gives us an invalid block
3528 * number, then just use the start of the next block
3529 * offset by our length.
3531 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3532 ISVALID(gotbno - gotdiff, gotbno))
3534 else if (ISVALID(gotbno - ap->length, gotbno)) {
3535 gotbno -= ap->length;
3536 gotdiff += adjust - ap->length;
3540 * If the firstblock forbids it, can't use it,
3543 if (!rt && !nullfb &&
3544 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3545 gotbno = NULLFSBLOCK;
3548 * No next block, just default.
3551 gotbno = NULLFSBLOCK;
3553 * If both valid, pick the better one, else the only good
3554 * one, else ap->blkno is already set (to 0 or the inode block).
3556 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3557 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3558 else if (prevbno != NULLFSBLOCK)
3559 ap->blkno = prevbno;
3560 else if (gotbno != NULLFSBLOCK)
3567 xfs_bmap_longest_free_extent(
3568 struct xfs_trans *tp,
3573 struct xfs_mount *mp = tp->t_mountp;
3574 struct xfs_perag *pag;
3575 xfs_extlen_t longest;
3578 pag = xfs_perag_get(mp, ag);
3579 if (!pag->pagf_init) {
3580 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3584 if (!pag->pagf_init) {
3590 longest = xfs_alloc_longest_free_extent(mp, pag,
3591 xfs_alloc_min_freelist(mp, pag),
3592 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3593 if (*blen < longest)
3602 xfs_bmap_select_minlen(
3603 struct xfs_bmalloca *ap,
3604 struct xfs_alloc_arg *args,
3608 if (notinit || *blen < ap->minlen) {
3610 * Since we did a BUF_TRYLOCK above, it is possible that
3611 * there is space for this request.
3613 args->minlen = ap->minlen;
3614 } else if (*blen < args->maxlen) {
3616 * If the best seen length is less than the request length,
3617 * use the best as the minimum.
3619 args->minlen = *blen;
3622 * Otherwise we've seen an extent as big as maxlen, use that
3625 args->minlen = args->maxlen;
3630 xfs_bmap_btalloc_nullfb(
3631 struct xfs_bmalloca *ap,
3632 struct xfs_alloc_arg *args,
3635 struct xfs_mount *mp = ap->ip->i_mount;
3636 xfs_agnumber_t ag, startag;
3640 args->type = XFS_ALLOCTYPE_START_BNO;
3641 args->total = ap->total;
3643 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3644 if (startag == NULLAGNUMBER)
3647 while (*blen < args->maxlen) {
3648 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3653 if (++ag == mp->m_sb.sb_agcount)
3659 xfs_bmap_select_minlen(ap, args, blen, notinit);
3664 xfs_bmap_btalloc_filestreams(
3665 struct xfs_bmalloca *ap,
3666 struct xfs_alloc_arg *args,
3669 struct xfs_mount *mp = ap->ip->i_mount;
3674 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3675 args->total = ap->total;
3677 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3678 if (ag == NULLAGNUMBER)
3681 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3685 if (*blen < args->maxlen) {
3686 error = xfs_filestream_new_ag(ap, &ag);
3690 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3697 xfs_bmap_select_minlen(ap, args, blen, notinit);
3700 * Set the failure fallback case to look in the selected AG as stream
3703 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3709 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3711 xfs_mount_t *mp; /* mount point structure */
3712 xfs_alloctype_t atype = 0; /* type for allocation routines */
3713 xfs_extlen_t align = 0; /* minimum allocation alignment */
3714 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3716 xfs_alloc_arg_t args;
3718 xfs_extlen_t nextminlen = 0;
3719 int nullfb; /* true if ap->firstblock isn't set */
3727 mp = ap->ip->i_mount;
3729 /* stripe alignment for allocation is determined by mount parameters */
3731 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3732 stripe_align = mp->m_swidth;
3733 else if (mp->m_dalign)
3734 stripe_align = mp->m_dalign;
3736 if (ap->flags & XFS_BMAPI_COWFORK)
3737 align = xfs_get_cowextsz_hint(ap->ip);
3738 else if (xfs_alloc_is_userdata(ap->datatype))
3739 align = xfs_get_extsz_hint(ap->ip);
3741 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3742 align, 0, ap->eof, 0, ap->conv,
3743 &ap->offset, &ap->length);
3749 nullfb = *ap->firstblock == NULLFSBLOCK;
3750 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3752 if (xfs_alloc_is_userdata(ap->datatype) &&
3753 xfs_inode_is_filestream(ap->ip)) {
3754 ag = xfs_filestream_lookup_ag(ap->ip);
3755 ag = (ag != NULLAGNUMBER) ? ag : 0;
3756 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3758 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3761 ap->blkno = *ap->firstblock;
3763 xfs_bmap_adjacent(ap);
3766 * If allowed, use ap->blkno; otherwise must use firstblock since
3767 * it's in the right allocation group.
3769 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3772 ap->blkno = *ap->firstblock;
3774 * Normal allocation, done through xfs_alloc_vextent.
3776 tryagain = isaligned = 0;
3777 memset(&args, 0, sizeof(args));
3780 args.fsbno = ap->blkno;
3781 xfs_rmap_skip_owner_update(&args.oinfo);
3783 /* Trim the allocation back to the maximum an AG can fit. */
3784 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3785 args.firstblock = *ap->firstblock;
3789 * Search for an allocation group with a single extent large
3790 * enough for the request. If one isn't found, then adjust
3791 * the minimum allocation size to the largest space found.
3793 if (xfs_alloc_is_userdata(ap->datatype) &&
3794 xfs_inode_is_filestream(ap->ip))
3795 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3797 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3800 } else if (ap->dfops->dop_low) {
3801 if (xfs_inode_is_filestream(ap->ip))
3802 args.type = XFS_ALLOCTYPE_FIRST_AG;
3804 args.type = XFS_ALLOCTYPE_START_BNO;
3805 args.total = args.minlen = ap->minlen;
3807 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3808 args.total = ap->total;
3809 args.minlen = ap->minlen;
3811 /* apply extent size hints if obtained earlier */
3814 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3815 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3816 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3820 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3821 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3822 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3825 * If we are not low on available data blocks, and the
3826 * underlying logical volume manager is a stripe, and
3827 * the file offset is zero then try to allocate data
3828 * blocks on stripe unit boundary.
3829 * NOTE: ap->aeof is only set if the allocation length
3830 * is >= the stripe unit and the allocation offset is
3831 * at the end of file.
3833 if (!ap->dfops->dop_low && ap->aeof) {
3835 args.alignment = stripe_align;
3839 * Adjust for alignment
3841 if (blen > args.alignment && blen <= args.maxlen)
3842 args.minlen = blen - args.alignment;
3843 args.minalignslop = 0;
3846 * First try an exact bno allocation.
3847 * If it fails then do a near or start bno
3848 * allocation with alignment turned on.
3852 args.type = XFS_ALLOCTYPE_THIS_BNO;
3855 * Compute the minlen+alignment for the
3856 * next case. Set slop so that the value
3857 * of minlen+alignment+slop doesn't go up
3858 * between the calls.
3860 if (blen > stripe_align && blen <= args.maxlen)
3861 nextminlen = blen - stripe_align;
3863 nextminlen = args.minlen;
3864 if (nextminlen + stripe_align > args.minlen + 1)
3866 nextminlen + stripe_align -
3869 args.minalignslop = 0;
3873 args.minalignslop = 0;
3875 args.minleft = ap->minleft;
3876 args.wasdel = ap->wasdel;
3877 args.resv = XFS_AG_RESV_NONE;
3878 args.datatype = ap->datatype;
3879 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3882 error = xfs_alloc_vextent(&args);
3886 if (tryagain && args.fsbno == NULLFSBLOCK) {
3888 * Exact allocation failed. Now try with alignment
3892 args.fsbno = ap->blkno;
3893 args.alignment = stripe_align;
3894 args.minlen = nextminlen;
3895 args.minalignslop = 0;
3897 if ((error = xfs_alloc_vextent(&args)))
3900 if (isaligned && args.fsbno == NULLFSBLOCK) {
3902 * allocation failed, so turn off alignment and
3906 args.fsbno = ap->blkno;
3908 if ((error = xfs_alloc_vextent(&args)))
3911 if (args.fsbno == NULLFSBLOCK && nullfb &&
3912 args.minlen > ap->minlen) {
3913 args.minlen = ap->minlen;
3914 args.type = XFS_ALLOCTYPE_START_BNO;
3915 args.fsbno = ap->blkno;
3916 if ((error = xfs_alloc_vextent(&args)))
3919 if (args.fsbno == NULLFSBLOCK && nullfb) {
3921 args.type = XFS_ALLOCTYPE_FIRST_AG;
3922 args.total = ap->minlen;
3923 if ((error = xfs_alloc_vextent(&args)))
3925 ap->dfops->dop_low = true;
3927 if (args.fsbno != NULLFSBLOCK) {
3929 * check the allocation happened at the same or higher AG than
3930 * the first block that was allocated.
3932 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3933 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3934 XFS_FSB_TO_AGNO(mp, args.fsbno));
3936 ap->blkno = args.fsbno;
3937 if (*ap->firstblock == NULLFSBLOCK)
3938 *ap->firstblock = args.fsbno;
3939 ASSERT(nullfb || fb_agno <= args.agno);
3940 ap->length = args.len;
3941 if (!(ap->flags & XFS_BMAPI_COWFORK))
3942 ap->ip->i_d.di_nblocks += args.len;
3943 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3945 ap->ip->i_delayed_blks -= args.len;
3947 * Adjust the disk quota also. This was reserved
3950 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3951 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3952 XFS_TRANS_DQ_BCOUNT,
3955 ap->blkno = NULLFSBLOCK;
3962 * For a remap operation, just "allocate" an extent at the address that the
3963 * caller passed in, and ensure that the AGFL is the right size. The caller
3964 * will then map the "allocated" extent into the file somewhere.
3967 xfs_bmap_remap_alloc(
3968 struct xfs_bmalloca *ap)
3970 struct xfs_trans *tp = ap->tp;
3971 struct xfs_mount *mp = tp->t_mountp;
3973 struct xfs_alloc_arg args;
3977 * validate that the block number is legal - the enables us to detect
3978 * and handle a silent filesystem corruption rather than crashing.
3980 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3982 args.mp = ap->tp->t_mountp;
3983 bno = *ap->firstblock;
3984 args.agno = XFS_FSB_TO_AGNO(mp, bno);
3985 args.agbno = XFS_FSB_TO_AGBNO(mp, bno);
3986 if (args.agno >= mp->m_sb.sb_agcount ||
3987 args.agbno >= mp->m_sb.sb_agblocks)
3988 return -EFSCORRUPTED;
3990 /* "Allocate" the extent from the range we passed in. */
3991 trace_xfs_bmap_remap_alloc(ap->ip, *ap->firstblock, ap->length);
3993 ap->ip->i_d.di_nblocks += ap->length;
3994 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3996 /* Fix the freelist, like a real allocator does. */
3997 args.datatype = ap->datatype;
3998 args.pag = xfs_perag_get(args.mp, args.agno);
4002 * The freelist fixing code will decline the allocation if
4003 * the size and shape of the free space doesn't allow for
4004 * allocating the extent and updating all the metadata that
4005 * happens during an allocation. We're remapping, not
4006 * allocating, so skip that check by pretending to be freeing.
4008 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
4009 xfs_perag_put(args.pag);
4011 trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
4016 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
4017 * It figures out where to ask the underlying allocator to put the new extent.
4021 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
4023 if (ap->flags & XFS_BMAPI_REMAP)
4024 return xfs_bmap_remap_alloc(ap);
4025 if (XFS_IS_REALTIME_INODE(ap->ip) &&
4026 xfs_alloc_is_userdata(ap->datatype))
4027 return xfs_bmap_rtalloc(ap);
4028 return xfs_bmap_btalloc(ap);
4031 /* Trim extent to fit a logical block range. */
4034 struct xfs_bmbt_irec *irec,
4038 xfs_fileoff_t distance;
4039 xfs_fileoff_t end = bno + len;
4041 if (irec->br_startoff + irec->br_blockcount <= bno ||
4042 irec->br_startoff >= end) {
4043 irec->br_blockcount = 0;
4047 if (irec->br_startoff < bno) {
4048 distance = bno - irec->br_startoff;
4049 if (isnullstartblock(irec->br_startblock))
4050 irec->br_startblock = DELAYSTARTBLOCK;
4051 if (irec->br_startblock != DELAYSTARTBLOCK &&
4052 irec->br_startblock != HOLESTARTBLOCK)
4053 irec->br_startblock += distance;
4054 irec->br_startoff += distance;
4055 irec->br_blockcount -= distance;
4058 if (end < irec->br_startoff + irec->br_blockcount) {
4059 distance = irec->br_startoff + irec->br_blockcount - end;
4060 irec->br_blockcount -= distance;
4064 /* trim extent to within eof */
4066 xfs_trim_extent_eof(
4067 struct xfs_bmbt_irec *irec,
4068 struct xfs_inode *ip)
4071 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
4072 i_size_read(VFS_I(ip))));
4076 * Trim the returned map to the required bounds
4080 struct xfs_bmbt_irec *mval,
4081 struct xfs_bmbt_irec *got,
4089 if ((flags & XFS_BMAPI_ENTIRE) ||
4090 got->br_startoff + got->br_blockcount <= obno) {
4092 if (isnullstartblock(got->br_startblock))
4093 mval->br_startblock = DELAYSTARTBLOCK;
4099 ASSERT((*bno >= obno) || (n == 0));
4101 mval->br_startoff = *bno;
4102 if (isnullstartblock(got->br_startblock))
4103 mval->br_startblock = DELAYSTARTBLOCK;
4105 mval->br_startblock = got->br_startblock +
4106 (*bno - got->br_startoff);
4108 * Return the minimum of what we got and what we asked for for
4109 * the length. We can use the len variable here because it is
4110 * modified below and we could have been there before coming
4111 * here if the first part of the allocation didn't overlap what
4114 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
4115 got->br_blockcount - (*bno - got->br_startoff));
4116 mval->br_state = got->br_state;
4117 ASSERT(mval->br_blockcount <= len);
4122 * Update and validate the extent map to return
4125 xfs_bmapi_update_map(
4126 struct xfs_bmbt_irec **map,
4134 xfs_bmbt_irec_t *mval = *map;
4136 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4137 ((mval->br_startoff + mval->br_blockcount) <= end));
4138 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
4139 (mval->br_startoff < obno));
4141 *bno = mval->br_startoff + mval->br_blockcount;
4143 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
4144 /* update previous map with new information */
4145 ASSERT(mval->br_startblock == mval[-1].br_startblock);
4146 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
4147 ASSERT(mval->br_state == mval[-1].br_state);
4148 mval[-1].br_blockcount = mval->br_blockcount;
4149 mval[-1].br_state = mval->br_state;
4150 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
4151 mval[-1].br_startblock != DELAYSTARTBLOCK &&
4152 mval[-1].br_startblock != HOLESTARTBLOCK &&
4153 mval->br_startblock == mval[-1].br_startblock +
4154 mval[-1].br_blockcount &&
4155 ((flags & XFS_BMAPI_IGSTATE) ||
4156 mval[-1].br_state == mval->br_state)) {
4157 ASSERT(mval->br_startoff ==
4158 mval[-1].br_startoff + mval[-1].br_blockcount);
4159 mval[-1].br_blockcount += mval->br_blockcount;
4160 } else if (*n > 0 &&
4161 mval->br_startblock == DELAYSTARTBLOCK &&
4162 mval[-1].br_startblock == DELAYSTARTBLOCK &&
4163 mval->br_startoff ==
4164 mval[-1].br_startoff + mval[-1].br_blockcount) {
4165 mval[-1].br_blockcount += mval->br_blockcount;
4166 mval[-1].br_state = mval->br_state;
4167 } else if (!((*n == 0) &&
4168 ((mval->br_startoff + mval->br_blockcount) <=
4177 * Map file blocks to filesystem blocks without allocation.
4181 struct xfs_inode *ip,
4184 struct xfs_bmbt_irec *mval,
4188 struct xfs_mount *mp = ip->i_mount;
4189 struct xfs_ifork *ifp;
4190 struct xfs_bmbt_irec got;
4191 struct xfs_bmbt_irec prev;
4198 int whichfork = xfs_bmapi_whichfork(flags);
4201 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
4202 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
4203 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
4205 if (unlikely(XFS_TEST_ERROR(
4206 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4207 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4208 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4209 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4210 return -EFSCORRUPTED;
4213 if (XFS_FORCED_SHUTDOWN(mp))
4216 XFS_STATS_INC(mp, xs_blk_mapr);
4218 ifp = XFS_IFORK_PTR(ip, whichfork);
4220 /* No CoW fork? Return a hole. */
4221 if (whichfork == XFS_COW_FORK) {
4222 mval->br_startoff = bno;
4223 mval->br_startblock = HOLESTARTBLOCK;
4224 mval->br_blockcount = len;
4225 mval->br_state = XFS_EXT_NORM;
4231 * A missing attr ifork implies that the inode says we're in
4232 * extents or btree format but failed to pass the inode fork
4233 * verifier while trying to load it. Treat that as a file
4237 xfs_alert(mp, "%s: inode %llu missing fork %d",
4238 __func__, ip->i_ino, whichfork);
4240 return -EFSCORRUPTED;
4243 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4244 error = xfs_iread_extents(NULL, ip, whichfork);
4249 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
4253 while (bno < end && n < *nmap) {
4254 /* Reading past eof, act as though there's a hole up to end. */
4256 got.br_startoff = end;
4257 if (got.br_startoff > bno) {
4258 /* Reading in a hole. */
4259 mval->br_startoff = bno;
4260 mval->br_startblock = HOLESTARTBLOCK;
4261 mval->br_blockcount =
4262 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4263 mval->br_state = XFS_EXT_NORM;
4264 bno += mval->br_blockcount;
4265 len -= mval->br_blockcount;
4271 /* set up the extent map to return. */
4272 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4273 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4275 /* If we're done, stop now. */
4276 if (bno >= end || n >= *nmap)
4279 /* Else go on to the next record. */
4280 if (++lastx < xfs_iext_count(ifp))
4281 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4290 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4291 * global pool and the extent inserted into the inode in-core extent tree.
4293 * On entry, got refers to the first extent beyond the offset of the extent to
4294 * allocate or eof is specified if no such extent exists. On return, got refers
4295 * to the extent record that was inserted to the inode fork.
4297 * Note that the allocated extent may have been merged with contiguous extents
4298 * during insertion into the inode fork. Thus, got does not reflect the current
4299 * state of the inode fork on return. If necessary, the caller can use lastx to
4300 * look up the updated record in the inode fork.
4303 xfs_bmapi_reserve_delalloc(
4304 struct xfs_inode *ip,
4308 xfs_filblks_t prealloc,
4309 struct xfs_bmbt_irec *got,
4310 xfs_extnum_t *lastx,
4313 struct xfs_mount *mp = ip->i_mount;
4314 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4316 xfs_extlen_t indlen;
4317 char rt = XFS_IS_REALTIME_INODE(ip);
4320 xfs_fileoff_t aoff = off;
4323 * Cap the alloc length. Keep track of prealloc so we know whether to
4324 * tag the inode before we return.
4326 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
4328 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4329 if (prealloc && alen >= len)
4330 prealloc = alen - len;
4332 /* Figure out the extent size, adjust alen */
4333 if (whichfork == XFS_COW_FORK)
4334 extsz = xfs_get_cowextsz_hint(ip);
4336 extsz = xfs_get_extsz_hint(ip);
4338 struct xfs_bmbt_irec prev;
4340 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
4341 prev.br_startoff = NULLFILEOFF;
4343 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
4344 1, 0, &aoff, &alen);
4349 extsz = alen / mp->m_sb.sb_rextsize;
4352 * Make a transaction-less quota reservation for delayed allocation
4353 * blocks. This number gets adjusted later. We return if we haven't
4354 * allocated blocks already inside this loop.
4356 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4357 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4362 * Split changing sb for alen and indlen since they could be coming
4363 * from different places.
4365 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4369 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4371 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4375 goto out_unreserve_quota;
4377 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4379 goto out_unreserve_blocks;
4382 ip->i_delayed_blks += alen;
4384 got->br_startoff = aoff;
4385 got->br_startblock = nullstartblock(indlen);
4386 got->br_blockcount = alen;
4387 got->br_state = XFS_EXT_NORM;
4389 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
4392 * Tag the inode if blocks were preallocated. Note that COW fork
4393 * preallocation can occur at the start or end of the extent, even when
4394 * prealloc == 0, so we must also check the aligned offset and length.
4396 if (whichfork == XFS_DATA_FORK && prealloc)
4397 xfs_inode_set_eofblocks_tag(ip);
4398 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4399 xfs_inode_set_cowblocks_tag(ip);
4403 out_unreserve_blocks:
4405 xfs_mod_frextents(mp, extsz);
4407 xfs_mod_fdblocks(mp, alen, false);
4408 out_unreserve_quota:
4409 if (XFS_IS_QUOTA_ON(mp))
4410 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4411 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4417 struct xfs_bmalloca *bma)
4419 struct xfs_mount *mp = bma->ip->i_mount;
4420 int whichfork = xfs_bmapi_whichfork(bma->flags);
4421 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4422 int tmp_logflags = 0;
4425 ASSERT(bma->length > 0);
4428 * For the wasdelay case, we could also just allocate the stuff asked
4429 * for in this bmap call but that wouldn't be as good.
4432 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4433 bma->offset = bma->got.br_startoff;
4434 if (bma->idx != NULLEXTNUM && bma->idx) {
4435 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4439 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4441 bma->length = XFS_FILBLKS_MIN(bma->length,
4442 bma->got.br_startoff - bma->offset);
4446 * Set the data type being allocated. For the data fork, the first data
4447 * in the file is treated differently to all other allocations. For the
4448 * attribute fork, we only need to ensure the allocated range is not on
4451 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4452 bma->datatype = XFS_ALLOC_NOBUSY;
4453 if (whichfork == XFS_DATA_FORK) {
4454 if (bma->offset == 0)
4455 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4457 bma->datatype |= XFS_ALLOC_USERDATA;
4459 if (bma->flags & XFS_BMAPI_ZERO)
4460 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4463 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4466 * Only want to do the alignment at the eof if it is userdata and
4467 * allocation length is larger than a stripe unit.
4469 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4470 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4471 error = xfs_bmap_isaeof(bma, whichfork);
4476 error = xfs_bmap_alloc(bma);
4481 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4482 if (bma->blkno == NULLFSBLOCK)
4484 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4485 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4486 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4487 bma->cur->bc_private.b.dfops = bma->dfops;
4490 * Bump the number of extents we've allocated
4496 bma->cur->bc_private.b.flags =
4497 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4499 bma->got.br_startoff = bma->offset;
4500 bma->got.br_startblock = bma->blkno;
4501 bma->got.br_blockcount = bma->length;
4502 bma->got.br_state = XFS_EXT_NORM;
4505 * In the data fork, a wasdelay extent has been initialized, so
4506 * shouldn't be flagged as unwritten.
4508 * For the cow fork, however, we convert delalloc reservations
4509 * (extents allocated for speculative preallocation) to
4510 * allocated unwritten extents, and only convert the unwritten
4511 * extents to real extents when we're about to write the data.
4513 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4514 (bma->flags & XFS_BMAPI_PREALLOC) &&
4515 xfs_sb_version_hasextflgbit(&mp->m_sb))
4516 bma->got.br_state = XFS_EXT_UNWRITTEN;
4519 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4521 error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4523 bma->logflags |= tmp_logflags;
4528 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4529 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4530 * the neighbouring ones.
4532 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4534 ASSERT(bma->got.br_startoff <= bma->offset);
4535 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4536 bma->offset + bma->length);
4537 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4538 bma->got.br_state == XFS_EXT_UNWRITTEN);
4543 xfs_bmapi_convert_unwritten(
4544 struct xfs_bmalloca *bma,
4545 struct xfs_bmbt_irec *mval,
4549 int whichfork = xfs_bmapi_whichfork(flags);
4550 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4551 int tmp_logflags = 0;
4554 /* check if we need to do unwritten->real conversion */
4555 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4556 (flags & XFS_BMAPI_PREALLOC))
4559 /* check if we need to do real->unwritten conversion */
4560 if (mval->br_state == XFS_EXT_NORM &&
4561 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4562 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4566 * Modify (by adding) the state flag, if writing.
4568 ASSERT(mval->br_blockcount <= len);
4569 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4570 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4571 bma->ip, whichfork);
4572 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4573 bma->cur->bc_private.b.dfops = bma->dfops;
4575 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4576 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4579 * Before insertion into the bmbt, zero the range being converted
4582 if (flags & XFS_BMAPI_ZERO) {
4583 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4584 mval->br_blockcount);
4589 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4590 &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
4593 * Log the inode core unconditionally in the unwritten extent conversion
4594 * path because the conversion might not have done so (e.g., if the
4595 * extent count hasn't changed). We need to make sure the inode is dirty
4596 * in the transaction for the sake of fsync(), even if nothing has
4597 * changed, because fsync() will not force the log for this transaction
4598 * unless it sees the inode pinned.
4600 * Note: If we're only converting cow fork extents, there aren't
4601 * any on-disk updates to make, so we don't need to log anything.
4603 if (whichfork != XFS_COW_FORK)
4604 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4609 * Update our extent pointer, given that
4610 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4611 * of the neighbouring ones.
4613 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4616 * We may have combined previously unwritten space with written space,
4617 * so generate another request.
4619 if (mval->br_blockcount < len)
4625 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4626 * extent state if necessary. Details behaviour is controlled by the flags
4627 * parameter. Only allocates blocks from a single allocation group, to avoid
4630 * The returned value in "firstblock" from the first call in a transaction
4631 * must be remembered and presented to subsequent calls in "firstblock".
4632 * An upper bound for the number of blocks to be allocated is supplied to
4633 * the first call in "total"; if no allocation group has that many free
4634 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4638 struct xfs_trans *tp, /* transaction pointer */
4639 struct xfs_inode *ip, /* incore inode */
4640 xfs_fileoff_t bno, /* starting file offs. mapped */
4641 xfs_filblks_t len, /* length to map in file */
4642 int flags, /* XFS_BMAPI_... */
4643 xfs_fsblock_t *firstblock, /* first allocated block
4644 controls a.g. for allocs */
4645 xfs_extlen_t total, /* total blocks needed */
4646 struct xfs_bmbt_irec *mval, /* output: map values */
4647 int *nmap, /* i/o: mval size/count */
4648 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4650 struct xfs_mount *mp = ip->i_mount;
4651 struct xfs_ifork *ifp;
4652 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4653 xfs_fileoff_t end; /* end of mapped file region */
4654 int eof; /* after the end of extents */
4655 int error; /* error return */
4656 int n; /* current extent index */
4657 xfs_fileoff_t obno; /* old block number (offset) */
4658 int whichfork; /* data or attr fork */
4661 xfs_fileoff_t orig_bno; /* original block number value */
4662 int orig_flags; /* original flags arg value */
4663 xfs_filblks_t orig_len; /* original value of len arg */
4664 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4665 int orig_nmap; /* original value of *nmap */
4673 whichfork = xfs_bmapi_whichfork(flags);
4676 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4677 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4678 ASSERT(tp != NULL ||
4679 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4680 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4682 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4683 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4684 ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK);
4685 ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP));
4686 ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP));
4688 /* zeroing is for currently only for data extents, not metadata */
4689 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4690 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4692 * we can allocate unwritten extents or pre-zero allocated blocks,
4693 * but it makes no sense to do both at once. This would result in
4694 * zeroing the unwritten extent twice, but it still being an
4695 * unwritten extent....
4697 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4698 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4700 if (unlikely(XFS_TEST_ERROR(
4701 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4702 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4703 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4704 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4705 return -EFSCORRUPTED;
4708 if (XFS_FORCED_SHUTDOWN(mp))
4711 ifp = XFS_IFORK_PTR(ip, whichfork);
4713 XFS_STATS_INC(mp, xs_blk_mapw);
4715 if (*firstblock == NULLFSBLOCK) {
4716 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4717 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4724 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4725 error = xfs_iread_extents(tp, ip, whichfork);
4730 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
4741 bma.firstblock = firstblock;
4743 while (bno < end && n < *nmap) {
4744 bool need_alloc = false, wasdelay = false;
4746 /* in hole or beyoned EOF? */
4747 if (eof || bma.got.br_startoff > bno) {
4748 if (flags & XFS_BMAPI_DELALLOC) {
4750 * For the COW fork we can reasonably get a
4751 * request for converting an extent that races
4752 * with other threads already having converted
4753 * part of it, as there converting COW to
4754 * regular blocks is not protected using the
4757 ASSERT(flags & XFS_BMAPI_COWFORK);
4758 if (!(flags & XFS_BMAPI_COWFORK)) {
4763 if (eof || bno >= end)
4770 * Make sure we only reflink into a hole.
4772 ASSERT(!(flags & XFS_BMAPI_REMAP));
4773 if (isnullstartblock(bma.got.br_startblock))
4778 * First, deal with the hole before the allocated space
4779 * that we found, if any.
4781 if (need_alloc || wasdelay) {
4783 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4784 bma.wasdel = wasdelay;
4789 * There's a 32/64 bit type mismatch between the
4790 * allocation length request (which can be 64 bits in
4791 * length) and the bma length request, which is
4792 * xfs_extlen_t and therefore 32 bits. Hence we have to
4793 * check for 32-bit overflows and handle them here.
4795 if (len > (xfs_filblks_t)MAXEXTLEN)
4796 bma.length = MAXEXTLEN;
4801 ASSERT(bma.length > 0);
4802 error = xfs_bmapi_allocate(&bma);
4805 if (bma.blkno == NULLFSBLOCK)
4809 * If this is a CoW allocation, record the data in
4810 * the refcount btree for orphan recovery.
4812 if (whichfork == XFS_COW_FORK) {
4813 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4814 bma.blkno, bma.length);
4820 /* Deal with the allocated space we found. */
4821 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4824 /* Execute unwritten extent conversion if necessary */
4825 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4826 if (error == -EAGAIN)
4831 /* update the extent map to return */
4832 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4835 * If we're done, stop now. Stop when we've allocated
4836 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4837 * the transaction may get too big.
4839 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4842 /* Else go on to the next record. */
4844 if (++bma.idx < xfs_iext_count(ifp)) {
4845 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
4853 * Transform from btree to extents, give it cur.
4855 if (xfs_bmap_wants_extents(ip, whichfork)) {
4856 int tmp_logflags = 0;
4859 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4860 &tmp_logflags, whichfork);
4861 bma.logflags |= tmp_logflags;
4866 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4867 XFS_IFORK_NEXTENTS(ip, whichfork) >
4868 XFS_IFORK_MAXEXT(ip, whichfork));
4872 * Log everything. Do this after conversion, there's no point in
4873 * logging the extent records if we've converted to btree format.
4875 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4876 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4877 bma.logflags &= ~xfs_ilog_fext(whichfork);
4878 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4879 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4880 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4882 * Log whatever the flags say, even if error. Otherwise we might miss
4883 * detecting a case where the data is changed, there's an error,
4884 * and it's not logged so we don't shutdown when we should.
4887 xfs_trans_log_inode(tp, ip, bma.logflags);
4891 ASSERT(*firstblock == NULLFSBLOCK ||
4892 XFS_FSB_TO_AGNO(mp, *firstblock) <=
4894 bma.cur->bc_private.b.firstblock));
4895 *firstblock = bma.cur->bc_private.b.firstblock;
4897 xfs_btree_del_cursor(bma.cur,
4898 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4901 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4907 * When a delalloc extent is split (e.g., due to a hole punch), the original
4908 * indlen reservation must be shared across the two new extents that are left
4911 * Given the original reservation and the worst case indlen for the two new
4912 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4913 * reservation fairly across the two new extents. If necessary, steal available
4914 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4915 * ores == 1). The number of stolen blocks is returned. The availability and
4916 * subsequent accounting of stolen blocks is the responsibility of the caller.
4918 static xfs_filblks_t
4919 xfs_bmap_split_indlen(
4920 xfs_filblks_t ores, /* original res. */
4921 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4922 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4923 xfs_filblks_t avail) /* stealable blocks */
4925 xfs_filblks_t len1 = *indlen1;
4926 xfs_filblks_t len2 = *indlen2;
4927 xfs_filblks_t nres = len1 + len2; /* new total res. */
4928 xfs_filblks_t stolen = 0;
4929 xfs_filblks_t resfactor;
4932 * Steal as many blocks as we can to try and satisfy the worst case
4933 * indlen for both new extents.
4935 if (ores < nres && avail)
4936 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4939 /* nothing else to do if we've satisfied the new reservation */
4944 * We can't meet the total required reservation for the two extents.
4945 * Calculate the percent of the overall shortage between both extents
4946 * and apply this percentage to each of the requested indlen values.
4947 * This distributes the shortage fairly and reduces the chances that one
4948 * of the two extents is left with nothing when extents are repeatedly
4951 resfactor = (ores * 100);
4952 do_div(resfactor, nres);
4957 ASSERT(len1 + len2 <= ores);
4958 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4961 * Hand out the remainder to each extent. If one of the two reservations
4962 * is zero, we want to make sure that one gets a block first. The loop
4963 * below starts with len1, so hand len2 a block right off the bat if it
4966 ores -= (len1 + len2);
4967 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4968 if (ores && !len2 && *indlen2) {
4973 if (len1 < *indlen1) {
4979 if (len2 < *indlen2) {
4992 xfs_bmap_del_extent_delay(
4993 struct xfs_inode *ip,
4996 struct xfs_bmbt_irec *got,
4997 struct xfs_bmbt_irec *del)
4999 struct xfs_mount *mp = ip->i_mount;
5000 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5001 struct xfs_bmbt_irec new;
5002 int64_t da_old, da_new, da_diff = 0;
5003 xfs_fileoff_t del_endoff, got_endoff;
5004 xfs_filblks_t got_indlen, new_indlen, stolen;
5005 int error = 0, state = 0;
5008 XFS_STATS_INC(mp, xs_del_exlist);
5010 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5011 del_endoff = del->br_startoff + del->br_blockcount;
5012 got_endoff = got->br_startoff + got->br_blockcount;
5013 da_old = startblockval(got->br_startblock);
5017 ASSERT(*idx <= xfs_iext_count(ifp));
5018 ASSERT(del->br_blockcount > 0);
5019 ASSERT(got->br_startoff <= del->br_startoff);
5020 ASSERT(got_endoff >= del_endoff);
5023 int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
5025 do_div(rtexts, mp->m_sb.sb_rextsize);
5026 xfs_mod_frextents(mp, rtexts);
5030 * Update the inode delalloc counter now and wait to update the
5031 * sb counters as we might have to borrow some blocks for the
5032 * indirect block accounting.
5034 error = xfs_trans_reserve_quota_nblks(NULL, ip,
5035 -((long)del->br_blockcount), 0,
5036 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
5039 ip->i_delayed_blks -= del->br_blockcount;
5041 if (whichfork == XFS_COW_FORK)
5042 state |= BMAP_COWFORK;
5044 if (got->br_startoff == del->br_startoff)
5045 state |= BMAP_LEFT_CONTIG;
5046 if (got_endoff == del_endoff)
5047 state |= BMAP_RIGHT_CONTIG;
5049 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5050 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5052 * Matches the whole extent. Delete the entry.
5054 xfs_iext_remove(ip, *idx, 1, state);
5057 case BMAP_LEFT_CONTIG:
5059 * Deleting the first part of the extent.
5061 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5062 got->br_startoff = del_endoff;
5063 got->br_blockcount -= del->br_blockcount;
5064 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
5065 got->br_blockcount), da_old);
5066 got->br_startblock = nullstartblock((int)da_new);
5067 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5068 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5070 case BMAP_RIGHT_CONTIG:
5072 * Deleting the last part of the extent.
5074 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5075 got->br_blockcount = got->br_blockcount - del->br_blockcount;
5076 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
5077 got->br_blockcount), da_old);
5078 got->br_startblock = nullstartblock((int)da_new);
5079 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5080 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5084 * Deleting the middle of the extent.
5086 * Distribute the original indlen reservation across the two new
5087 * extents. Steal blocks from the deleted extent if necessary.
5088 * Stealing blocks simply fudges the fdblocks accounting below.
5089 * Warn if either of the new indlen reservations is zero as this
5090 * can lead to delalloc problems.
5092 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5094 got->br_blockcount = del->br_startoff - got->br_startoff;
5095 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
5097 new.br_blockcount = got_endoff - del_endoff;
5098 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5100 WARN_ON_ONCE(!got_indlen || !new_indlen);
5101 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
5102 del->br_blockcount);
5104 got->br_startblock = nullstartblock((int)got_indlen);
5105 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5106 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
5108 new.br_startoff = del_endoff;
5109 new.br_state = got->br_state;
5110 new.br_startblock = nullstartblock((int)new_indlen);
5113 xfs_iext_insert(ip, *idx, 1, &new, state);
5115 da_new = got_indlen + new_indlen - stolen;
5116 del->br_blockcount -= stolen;
5120 ASSERT(da_old >= da_new);
5121 da_diff = da_old - da_new;
5123 da_diff += del->br_blockcount;
5125 xfs_mod_fdblocks(mp, da_diff, false);
5130 xfs_bmap_del_extent_cow(
5131 struct xfs_inode *ip,
5133 struct xfs_bmbt_irec *got,
5134 struct xfs_bmbt_irec *del)
5136 struct xfs_mount *mp = ip->i_mount;
5137 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
5138 struct xfs_bmbt_irec new;
5139 xfs_fileoff_t del_endoff, got_endoff;
5140 int state = BMAP_COWFORK;
5142 XFS_STATS_INC(mp, xs_del_exlist);
5144 del_endoff = del->br_startoff + del->br_blockcount;
5145 got_endoff = got->br_startoff + got->br_blockcount;
5148 ASSERT(*idx <= xfs_iext_count(ifp));
5149 ASSERT(del->br_blockcount > 0);
5150 ASSERT(got->br_startoff <= del->br_startoff);
5151 ASSERT(got_endoff >= del_endoff);
5152 ASSERT(!isnullstartblock(got->br_startblock));
5154 if (got->br_startoff == del->br_startoff)
5155 state |= BMAP_LEFT_CONTIG;
5156 if (got_endoff == del_endoff)
5157 state |= BMAP_RIGHT_CONTIG;
5159 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5160 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5162 * Matches the whole extent. Delete the entry.
5164 xfs_iext_remove(ip, *idx, 1, state);
5167 case BMAP_LEFT_CONTIG:
5169 * Deleting the first part of the extent.
5171 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5172 got->br_startoff = del_endoff;
5173 got->br_blockcount -= del->br_blockcount;
5174 got->br_startblock = del->br_startblock + del->br_blockcount;
5175 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5176 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5178 case BMAP_RIGHT_CONTIG:
5180 * Deleting the last part of the extent.
5182 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5183 got->br_blockcount -= del->br_blockcount;
5184 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5185 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5189 * Deleting the middle of the extent.
5191 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5192 got->br_blockcount = del->br_startoff - got->br_startoff;
5193 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5194 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5196 new.br_startoff = del_endoff;
5197 new.br_blockcount = got_endoff - del_endoff;
5198 new.br_state = got->br_state;
5199 new.br_startblock = del->br_startblock + del->br_blockcount;
5202 xfs_iext_insert(ip, *idx, 1, &new, state);
5208 * Called by xfs_bmapi to update file extent records and the btree
5209 * after removing space (or undoing a delayed allocation).
5211 STATIC int /* error */
5212 xfs_bmap_del_extent(
5213 xfs_inode_t *ip, /* incore inode pointer */
5214 xfs_trans_t *tp, /* current transaction pointer */
5215 xfs_extnum_t *idx, /* extent number to update/delete */
5216 struct xfs_defer_ops *dfops, /* list of extents to be freed */
5217 xfs_btree_cur_t *cur, /* if null, not a btree */
5218 xfs_bmbt_irec_t *del, /* data to remove from extents */
5219 int *logflagsp, /* inode logging flags */
5220 int whichfork, /* data or attr fork */
5221 int bflags) /* bmapi flags */
5223 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
5224 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
5225 xfs_fsblock_t del_endblock=0; /* first block past del */
5226 xfs_fileoff_t del_endoff; /* first offset past del */
5227 int delay; /* current block is delayed allocated */
5228 int do_fx; /* free extent at end of routine */
5229 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
5230 int error; /* error return value */
5231 int flags; /* inode logging flags */
5232 xfs_bmbt_irec_t got; /* current extent entry */
5233 xfs_fileoff_t got_endoff; /* first offset past got */
5234 int i; /* temp state */
5235 xfs_ifork_t *ifp; /* inode fork pointer */
5236 xfs_mount_t *mp; /* mount structure */
5237 xfs_filblks_t nblks; /* quota/sb block count */
5238 xfs_bmbt_irec_t new; /* new record to be inserted */
5240 uint qfield; /* quota field to update */
5241 xfs_filblks_t temp; /* for indirect length calculations */
5242 xfs_filblks_t temp2; /* for indirect length calculations */
5246 XFS_STATS_INC(mp, xs_del_exlist);
5248 if (whichfork == XFS_ATTR_FORK)
5249 state |= BMAP_ATTRFORK;
5250 else if (whichfork == XFS_COW_FORK)
5251 state |= BMAP_COWFORK;
5253 ifp = XFS_IFORK_PTR(ip, whichfork);
5254 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
5255 ASSERT(del->br_blockcount > 0);
5256 ep = xfs_iext_get_ext(ifp, *idx);
5257 xfs_bmbt_get_all(ep, &got);
5258 ASSERT(got.br_startoff <= del->br_startoff);
5259 del_endoff = del->br_startoff + del->br_blockcount;
5260 got_endoff = got.br_startoff + got.br_blockcount;
5261 ASSERT(got_endoff >= del_endoff);
5262 delay = isnullstartblock(got.br_startblock);
5263 ASSERT(isnullstartblock(del->br_startblock) == delay);
5268 * If deleting a real allocation, must free up the disk space.
5271 flags = XFS_ILOG_CORE;
5273 * Realtime allocation. Free it and record di_nblocks update.
5275 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5279 ASSERT(do_mod(del->br_blockcount,
5280 mp->m_sb.sb_rextsize) == 0);
5281 ASSERT(do_mod(del->br_startblock,
5282 mp->m_sb.sb_rextsize) == 0);
5283 bno = del->br_startblock;
5284 len = del->br_blockcount;
5285 do_div(bno, mp->m_sb.sb_rextsize);
5286 do_div(len, mp->m_sb.sb_rextsize);
5287 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5291 nblks = len * mp->m_sb.sb_rextsize;
5292 qfield = XFS_TRANS_DQ_RTBCOUNT;
5295 * Ordinary allocation.
5299 nblks = del->br_blockcount;
5300 qfield = XFS_TRANS_DQ_BCOUNT;
5303 * Set up del_endblock and cur for later.
5305 del_endblock = del->br_startblock + del->br_blockcount;
5307 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5308 got.br_startblock, got.br_blockcount,
5311 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5313 da_old = da_new = 0;
5315 da_old = startblockval(got.br_startblock);
5322 * Set flag value to use in switch statement.
5323 * Left-contig is 2, right-contig is 1.
5325 switch (((got.br_startoff == del->br_startoff) << 1) |
5326 (got_endoff == del_endoff)) {
5329 * Matches the whole extent. Delete the entry.
5331 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5332 xfs_iext_remove(ip, *idx, 1,
5333 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
5338 XFS_IFORK_NEXT_SET(ip, whichfork,
5339 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5340 flags |= XFS_ILOG_CORE;
5342 flags |= xfs_ilog_fext(whichfork);
5345 if ((error = xfs_btree_delete(cur, &i)))
5347 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5352 * Deleting the first part of the extent.
5354 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5355 xfs_bmbt_set_startoff(ep, del_endoff);
5356 temp = got.br_blockcount - del->br_blockcount;
5357 xfs_bmbt_set_blockcount(ep, temp);
5359 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5361 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5362 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5366 xfs_bmbt_set_startblock(ep, del_endblock);
5367 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5369 flags |= xfs_ilog_fext(whichfork);
5372 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
5373 got.br_blockcount - del->br_blockcount,
5380 * Deleting the last part of the extent.
5382 temp = got.br_blockcount - del->br_blockcount;
5383 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5384 xfs_bmbt_set_blockcount(ep, temp);
5386 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5388 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5389 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5393 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5395 flags |= xfs_ilog_fext(whichfork);
5398 if ((error = xfs_bmbt_update(cur, got.br_startoff,
5400 got.br_blockcount - del->br_blockcount,
5407 * Deleting the middle of the extent.
5409 temp = del->br_startoff - got.br_startoff;
5410 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5411 xfs_bmbt_set_blockcount(ep, temp);
5412 new.br_startoff = del_endoff;
5413 temp2 = got_endoff - del_endoff;
5414 new.br_blockcount = temp2;
5415 new.br_state = got.br_state;
5417 new.br_startblock = del_endblock;
5418 flags |= XFS_ILOG_CORE;
5420 if ((error = xfs_bmbt_update(cur,
5422 got.br_startblock, temp,
5425 if ((error = xfs_btree_increment(cur, 0, &i)))
5427 cur->bc_rec.b = new;
5428 error = xfs_btree_insert(cur, &i);
5429 if (error && error != -ENOSPC)
5432 * If get no-space back from btree insert,
5433 * it tried a split, and we have a zero
5434 * block reservation.
5435 * Fix up our state and return the error.
5437 if (error == -ENOSPC) {
5439 * Reset the cursor, don't trust
5440 * it after any insert operation.
5442 if ((error = xfs_bmbt_lookup_eq(cur,
5447 XFS_WANT_CORRUPTED_GOTO(mp,
5450 * Update the btree record back
5451 * to the original value.
5453 if ((error = xfs_bmbt_update(cur,
5460 * Reset the extent record back
5461 * to the original value.
5463 xfs_bmbt_set_blockcount(ep,
5469 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5471 flags |= xfs_ilog_fext(whichfork);
5472 XFS_IFORK_NEXT_SET(ip, whichfork,
5473 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5475 xfs_filblks_t stolen;
5476 ASSERT(whichfork == XFS_DATA_FORK);
5479 * Distribute the original indlen reservation across the
5480 * two new extents. Steal blocks from the deleted extent
5481 * if necessary. Stealing blocks simply fudges the
5482 * fdblocks accounting in xfs_bunmapi().
5484 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5485 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5486 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5487 del->br_blockcount);
5488 da_new = temp + temp2 - stolen;
5489 del->br_blockcount -= stolen;
5492 * Set the reservation for each extent. Warn if either
5493 * is zero as this can lead to delalloc problems.
5495 WARN_ON_ONCE(!temp || !temp2);
5496 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5497 new.br_startblock = nullstartblock((int)temp2);
5499 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5500 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5505 /* remove reverse mapping */
5507 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5513 * If we need to, add to list of extents to delete.
5515 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5516 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5517 error = xfs_refcount_decrease_extent(mp, dfops, del);
5521 xfs_bmap_add_free(mp, dfops, del->br_startblock,
5522 del->br_blockcount, NULL);
5526 * Adjust inode # blocks in the file.
5529 ip->i_d.di_nblocks -= nblks;
5531 * Adjust quota data.
5533 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5534 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5537 * Account for change in delayed indirect blocks.
5538 * Nothing to do for disk quota accounting here.
5540 ASSERT(da_old >= da_new);
5541 if (da_old > da_new)
5542 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5549 * Unmap (remove) blocks from a file.
5550 * If nexts is nonzero then the number of extents to remove is limited to
5551 * that value. If not all extents in the block range can be removed then
5556 xfs_trans_t *tp, /* transaction pointer */
5557 struct xfs_inode *ip, /* incore inode */
5558 xfs_fileoff_t bno, /* starting offset to unmap */
5559 xfs_filblks_t *rlen, /* i/o: amount remaining */
5560 int flags, /* misc flags */
5561 xfs_extnum_t nexts, /* number of extents max */
5562 xfs_fsblock_t *firstblock, /* first allocated block
5563 controls a.g. for allocs */
5564 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
5566 xfs_btree_cur_t *cur; /* bmap btree cursor */
5567 xfs_bmbt_irec_t del; /* extent being deleted */
5568 int eof; /* is deleting at eof */
5569 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
5570 int error; /* error return value */
5571 xfs_extnum_t extno; /* extent number in list */
5572 xfs_bmbt_irec_t got; /* current extent record */
5573 xfs_ifork_t *ifp; /* inode fork pointer */
5574 int isrt; /* freeing in rt area */
5575 xfs_extnum_t lastx; /* last extent index used */
5576 int logflags; /* transaction logging flags */
5577 xfs_extlen_t mod; /* rt extent offset */
5578 xfs_mount_t *mp; /* mount structure */
5579 xfs_bmbt_irec_t prev; /* previous extent record */
5580 xfs_fileoff_t start; /* first file offset deleted */
5581 int tmp_logflags; /* partial logging flags */
5582 int wasdel; /* was a delayed alloc extent */
5583 int whichfork; /* data or attribute fork */
5585 xfs_filblks_t len = *rlen; /* length to unmap in file */
5586 xfs_fileoff_t max_len;
5587 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5589 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5591 whichfork = xfs_bmapi_whichfork(flags);
5592 ASSERT(whichfork != XFS_COW_FORK);
5593 ifp = XFS_IFORK_PTR(ip, whichfork);
5595 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5596 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5597 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5599 return -EFSCORRUPTED;
5602 if (XFS_FORCED_SHUTDOWN(mp))
5605 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5610 * Guesstimate how many blocks we can unmap without running the risk of
5611 * blowing out the transaction with a mix of EFIs and reflink
5614 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5615 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5619 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5620 (error = xfs_iread_extents(tp, ip, whichfork)))
5622 if (xfs_iext_count(ifp) == 0) {
5626 XFS_STATS_INC(mp, xs_blk_unmap);
5627 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5629 bno = start + len - 1;
5630 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5634 * Check to see if the given block number is past the end of the
5635 * file, back up to the last block if so...
5638 ep = xfs_iext_get_ext(ifp, --lastx);
5639 xfs_bmbt_get_all(ep, &got);
5640 bno = got.br_startoff + got.br_blockcount - 1;
5643 if (ifp->if_flags & XFS_IFBROOT) {
5644 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5645 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5646 cur->bc_private.b.firstblock = *firstblock;
5647 cur->bc_private.b.dfops = dfops;
5648 cur->bc_private.b.flags = 0;
5654 * Synchronize by locking the bitmap inode.
5656 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5657 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5658 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5659 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5663 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5664 (nexts == 0 || extno < nexts) && max_len > 0) {
5666 * Is the found extent after a hole in which bno lives?
5667 * Just back up to the previous extent, if so.
5669 if (got.br_startoff > bno) {
5672 ep = xfs_iext_get_ext(ifp, lastx);
5673 xfs_bmbt_get_all(ep, &got);
5676 * Is the last block of this extent before the range
5677 * we're supposed to delete? If so, we're done.
5679 bno = XFS_FILEOFF_MIN(bno,
5680 got.br_startoff + got.br_blockcount - 1);
5684 * Then deal with the (possibly delayed) allocated space
5689 wasdel = isnullstartblock(del.br_startblock);
5692 * Make sure we don't touch multiple AGF headers out of order
5693 * in a single transaction, as that could cause AB-BA deadlocks.
5695 if (!wasdel && !isrt) {
5696 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5697 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5701 if (got.br_startoff < start) {
5702 del.br_startoff = start;
5703 del.br_blockcount -= start - got.br_startoff;
5705 del.br_startblock += start - got.br_startoff;
5707 if (del.br_startoff + del.br_blockcount > bno + 1)
5708 del.br_blockcount = bno + 1 - del.br_startoff;
5710 /* How much can we safely unmap? */
5711 if (max_len < del.br_blockcount) {
5712 del.br_startoff += del.br_blockcount - max_len;
5714 del.br_startblock += del.br_blockcount - max_len;
5715 del.br_blockcount = max_len;
5718 sum = del.br_startblock + del.br_blockcount;
5720 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5722 * Realtime extent not lined up at the end.
5723 * The extent could have been split into written
5724 * and unwritten pieces, or we could just be
5725 * unmapping part of it. But we can't really
5726 * get rid of part of a realtime extent.
5728 if (del.br_state == XFS_EXT_UNWRITTEN ||
5729 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5731 * This piece is unwritten, or we're not
5732 * using unwritten extents. Skip over it.
5735 bno -= mod > del.br_blockcount ?
5736 del.br_blockcount : mod;
5737 if (bno < got.br_startoff) {
5739 xfs_bmbt_get_all(xfs_iext_get_ext(
5745 * It's written, turn it unwritten.
5746 * This is better than zeroing it.
5748 ASSERT(del.br_state == XFS_EXT_NORM);
5749 ASSERT(tp->t_blk_res > 0);
5751 * If this spans a realtime extent boundary,
5752 * chop it back to the start of the one we end at.
5754 if (del.br_blockcount > mod) {
5755 del.br_startoff += del.br_blockcount - mod;
5756 del.br_startblock += del.br_blockcount - mod;
5757 del.br_blockcount = mod;
5759 del.br_state = XFS_EXT_UNWRITTEN;
5760 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5761 whichfork, &lastx, &cur, &del,
5762 firstblock, dfops, &logflags);
5767 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5769 * Realtime extent is lined up at the end but not
5770 * at the front. We'll get rid of full extents if
5773 mod = mp->m_sb.sb_rextsize - mod;
5774 if (del.br_blockcount > mod) {
5775 del.br_blockcount -= mod;
5776 del.br_startoff += mod;
5777 del.br_startblock += mod;
5778 } else if ((del.br_startoff == start &&
5779 (del.br_state == XFS_EXT_UNWRITTEN ||
5780 tp->t_blk_res == 0)) ||
5781 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5783 * Can't make it unwritten. There isn't
5784 * a full extent here so just skip it.
5786 ASSERT(bno >= del.br_blockcount);
5787 bno -= del.br_blockcount;
5788 if (got.br_startoff > bno) {
5790 ep = xfs_iext_get_ext(ifp,
5792 xfs_bmbt_get_all(ep, &got);
5796 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5798 * This one is already unwritten.
5799 * It must have a written left neighbor.
5800 * Unwrite the killed part of that one and
5804 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5806 ASSERT(prev.br_state == XFS_EXT_NORM);
5807 ASSERT(!isnullstartblock(prev.br_startblock));
5808 ASSERT(del.br_startblock ==
5809 prev.br_startblock + prev.br_blockcount);
5810 if (prev.br_startoff < start) {
5811 mod = start - prev.br_startoff;
5812 prev.br_blockcount -= mod;
5813 prev.br_startblock += mod;
5814 prev.br_startoff = start;
5816 prev.br_state = XFS_EXT_UNWRITTEN;
5818 error = xfs_bmap_add_extent_unwritten_real(tp,
5819 ip, whichfork, &lastx, &cur,
5820 &prev, firstblock, dfops,
5826 ASSERT(del.br_state == XFS_EXT_NORM);
5827 del.br_state = XFS_EXT_UNWRITTEN;
5828 error = xfs_bmap_add_extent_unwritten_real(tp,
5829 ip, whichfork, &lastx, &cur,
5830 &del, firstblock, dfops,
5839 * If it's the case where the directory code is running
5840 * with no block reservation, and the deleted block is in
5841 * the middle of its extent, and the resulting insert
5842 * of an extent would cause transformation to btree format,
5843 * then reject it. The calling code will then swap
5844 * blocks around instead.
5845 * We have to do this now, rather than waiting for the
5846 * conversion to btree format, since the transaction
5849 if (!wasdel && tp->t_blk_res == 0 &&
5850 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5851 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5852 XFS_IFORK_MAXEXT(ip, whichfork) &&
5853 del.br_startoff > got.br_startoff &&
5854 del.br_startoff + del.br_blockcount <
5855 got.br_startoff + got.br_blockcount) {
5861 * Unreserve quota and update realtime free space, if
5862 * appropriate. If delayed allocation, update the inode delalloc
5863 * counter now and wait to update the sb counters as
5864 * xfs_bmap_del_extent() might need to borrow some blocks.
5867 ASSERT(startblockval(del.br_startblock) > 0);
5869 xfs_filblks_t rtexts;
5871 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5872 do_div(rtexts, mp->m_sb.sb_rextsize);
5873 xfs_mod_frextents(mp, (int64_t)rtexts);
5874 (void)xfs_trans_reserve_quota_nblks(NULL,
5875 ip, -((long)del.br_blockcount), 0,
5876 XFS_QMOPT_RES_RTBLKS);
5878 (void)xfs_trans_reserve_quota_nblks(NULL,
5879 ip, -((long)del.br_blockcount), 0,
5880 XFS_QMOPT_RES_REGBLKS);
5882 ip->i_delayed_blks -= del.br_blockcount;
5884 cur->bc_private.b.flags |=
5885 XFS_BTCUR_BPRV_WASDEL;
5887 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5889 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
5890 &tmp_logflags, whichfork, flags);
5891 logflags |= tmp_logflags;
5895 if (!isrt && wasdel)
5896 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5898 max_len -= del.br_blockcount;
5899 bno = del.br_startoff - 1;
5902 * If not done go on to the next (previous) record.
5904 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5906 ep = xfs_iext_get_ext(ifp, lastx);
5907 if (xfs_bmbt_get_startoff(ep) > bno) {
5909 ep = xfs_iext_get_ext(ifp,
5912 xfs_bmbt_get_all(ep, &got);
5917 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
5920 *rlen = bno - start + 1;
5923 * Convert to a btree if necessary.
5925 if (xfs_bmap_needs_btree(ip, whichfork)) {
5926 ASSERT(cur == NULL);
5927 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5928 &cur, 0, &tmp_logflags, whichfork);
5929 logflags |= tmp_logflags;
5934 * transform from btree to extents, give it cur
5936 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5937 ASSERT(cur != NULL);
5938 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5940 logflags |= tmp_logflags;
5945 * transform from extents to local?
5950 * Log everything. Do this after conversion, there's no point in
5951 * logging the extent records if we've converted to btree format.
5953 if ((logflags & xfs_ilog_fext(whichfork)) &&
5954 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5955 logflags &= ~xfs_ilog_fext(whichfork);
5956 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5957 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5958 logflags &= ~xfs_ilog_fbroot(whichfork);
5960 * Log inode even in the error case, if the transaction
5961 * is dirty we'll need to shut down the filesystem.
5964 xfs_trans_log_inode(tp, ip, logflags);
5967 *firstblock = cur->bc_private.b.firstblock;
5968 cur->bc_private.b.allocated = 0;
5970 xfs_btree_del_cursor(cur,
5971 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5976 /* Unmap a range of a file. */
5980 struct xfs_inode *ip,
5985 xfs_fsblock_t *firstblock,
5986 struct xfs_defer_ops *dfops,
5991 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5998 * Determine whether an extent shift can be accomplished by a merge with the
5999 * extent that precedes the target hole of the shift.
6003 struct xfs_bmbt_irec *left, /* preceding extent */
6004 struct xfs_bmbt_irec *got, /* current extent to shift */
6005 xfs_fileoff_t shift) /* shift fsb */
6007 xfs_fileoff_t startoff;
6009 startoff = got->br_startoff - shift;
6012 * The extent, once shifted, must be adjacent in-file and on-disk with
6013 * the preceding extent.
6015 if ((left->br_startoff + left->br_blockcount != startoff) ||
6016 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
6017 (left->br_state != got->br_state) ||
6018 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
6025 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
6026 * hole in the file. If an extent shift would result in the extent being fully
6027 * adjacent to the extent that currently precedes the hole, we can merge with
6028 * the preceding extent rather than do the shift.
6030 * This function assumes the caller has verified a shift-by-merge is possible
6031 * with the provided extents via xfs_bmse_can_merge().
6035 struct xfs_inode *ip,
6037 xfs_fileoff_t shift, /* shift fsb */
6038 int current_ext, /* idx of gotp */
6039 struct xfs_bmbt_rec_host *gotp, /* extent to shift */
6040 struct xfs_bmbt_rec_host *leftp, /* preceding extent */
6041 struct xfs_btree_cur *cur,
6042 int *logflags) /* output */
6044 struct xfs_bmbt_irec got;
6045 struct xfs_bmbt_irec left;
6046 xfs_filblks_t blockcount;
6048 struct xfs_mount *mp = ip->i_mount;
6050 xfs_bmbt_get_all(gotp, &got);
6051 xfs_bmbt_get_all(leftp, &left);
6052 blockcount = left.br_blockcount + got.br_blockcount;
6054 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
6055 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6056 ASSERT(xfs_bmse_can_merge(&left, &got, shift));
6059 * Merge the in-core extents. Note that the host record pointers and
6060 * current_ext index are invalid once the extent has been removed via
6061 * xfs_iext_remove().
6063 xfs_bmbt_set_blockcount(leftp, blockcount);
6064 xfs_iext_remove(ip, current_ext, 1, 0);
6067 * Update the on-disk extent count, the btree if necessary and log the
6070 XFS_IFORK_NEXT_SET(ip, whichfork,
6071 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
6072 *logflags |= XFS_ILOG_CORE;
6074 *logflags |= XFS_ILOG_DEXT;
6078 /* lookup and remove the extent to merge */
6079 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
6080 got.br_blockcount, &i);
6083 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6085 error = xfs_btree_delete(cur, &i);
6088 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6090 /* lookup and update size of the previous extent */
6091 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
6092 left.br_blockcount, &i);
6095 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6097 left.br_blockcount = blockcount;
6099 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
6100 left.br_blockcount, left.br_state);
6104 * Shift a single extent.
6108 struct xfs_inode *ip,
6110 xfs_fileoff_t offset_shift_fsb,
6112 struct xfs_bmbt_rec_host *gotp,
6113 struct xfs_btree_cur *cur,
6115 enum shift_direction direction,
6116 struct xfs_defer_ops *dfops)
6118 struct xfs_ifork *ifp;
6119 struct xfs_mount *mp;
6120 xfs_fileoff_t startoff;
6121 struct xfs_bmbt_rec_host *adj_irecp;
6122 struct xfs_bmbt_irec got;
6123 struct xfs_bmbt_irec adj_irec;
6129 ifp = XFS_IFORK_PTR(ip, whichfork);
6130 total_extents = xfs_iext_count(ifp);
6132 xfs_bmbt_get_all(gotp, &got);
6134 /* delalloc extents should be prevented by caller */
6135 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
6137 if (direction == SHIFT_LEFT) {
6138 startoff = got.br_startoff - offset_shift_fsb;
6141 * Check for merge if we've got an extent to the left,
6142 * otherwise make sure there's enough room at the start
6143 * of the file for the shift.
6145 if (!*current_ext) {
6146 if (got.br_startoff < offset_shift_fsb)
6148 goto update_current_ext;
6151 * grab the left extent and check for a large
6154 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
6155 xfs_bmbt_get_all(adj_irecp, &adj_irec);
6158 adj_irec.br_startoff + adj_irec.br_blockcount)
6161 /* check whether to merge the extent or shift it down */
6162 if (xfs_bmse_can_merge(&adj_irec, &got,
6163 offset_shift_fsb)) {
6164 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
6165 *current_ext, gotp, adj_irecp,
6173 startoff = got.br_startoff + offset_shift_fsb;
6174 /* nothing to move if this is the last extent */
6175 if (*current_ext >= (total_extents - 1))
6176 goto update_current_ext;
6178 * If this is not the last extent in the file, make sure there
6179 * is enough room between current extent and next extent for
6180 * accommodating the shift.
6182 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
6183 xfs_bmbt_get_all(adj_irecp, &adj_irec);
6184 if (startoff + got.br_blockcount > adj_irec.br_startoff)
6187 * Unlike a left shift (which involves a hole punch),
6188 * a right shift does not modify extent neighbors
6189 * in any way. We should never find mergeable extents
6190 * in this scenario. Check anyways and warn if we
6191 * encounter two extents that could be one.
6193 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
6197 * Increment the extent index for the next iteration, update the start
6198 * offset of the in-core extent and update the btree if applicable.
6201 if (direction == SHIFT_LEFT)
6205 xfs_bmbt_set_startoff(gotp, startoff);
6206 *logflags |= XFS_ILOG_CORE;
6209 *logflags |= XFS_ILOG_DEXT;
6213 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
6214 got.br_blockcount, &i);
6217 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6219 got.br_startoff = startoff;
6220 error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
6221 got.br_blockcount, got.br_state);
6226 /* update reverse mapping */
6227 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec);
6230 adj_irec.br_startoff = startoff;
6231 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec);
6235 * Shift extent records to the left/right to cover/create a hole.
6237 * The maximum number of extents to be shifted in a single operation is
6238 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
6239 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
6240 * is the length by which each extent is shifted. If there is no hole to shift
6241 * the extents into, this will be considered invalid operation and we abort
6245 xfs_bmap_shift_extents(
6246 struct xfs_trans *tp,
6247 struct xfs_inode *ip,
6248 xfs_fileoff_t *next_fsb,
6249 xfs_fileoff_t offset_shift_fsb,
6251 xfs_fileoff_t stop_fsb,
6252 xfs_fsblock_t *firstblock,
6253 struct xfs_defer_ops *dfops,
6254 enum shift_direction direction,
6257 struct xfs_btree_cur *cur = NULL;
6258 struct xfs_bmbt_rec_host *gotp;
6259 struct xfs_bmbt_irec got;
6260 struct xfs_mount *mp = ip->i_mount;
6261 struct xfs_ifork *ifp;
6262 xfs_extnum_t nexts = 0;
6263 xfs_extnum_t current_ext;
6264 xfs_extnum_t total_extents;
6265 xfs_extnum_t stop_extent;
6267 int whichfork = XFS_DATA_FORK;
6270 if (unlikely(XFS_TEST_ERROR(
6271 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6272 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6273 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
6274 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
6275 XFS_ERRLEVEL_LOW, mp);
6276 return -EFSCORRUPTED;
6279 if (XFS_FORCED_SHUTDOWN(mp))
6282 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
6283 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6284 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
6285 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
6287 ifp = XFS_IFORK_PTR(ip, whichfork);
6288 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6289 /* Read in all the extents */
6290 error = xfs_iread_extents(tp, ip, whichfork);
6295 if (ifp->if_flags & XFS_IFBROOT) {
6296 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6297 cur->bc_private.b.firstblock = *firstblock;
6298 cur->bc_private.b.dfops = dfops;
6299 cur->bc_private.b.flags = 0;
6303 * There may be delalloc extents in the data fork before the range we
6304 * are collapsing out, so we cannot use the count of real extents here.
6305 * Instead we have to calculate it from the incore fork.
6307 total_extents = xfs_iext_count(ifp);
6308 if (total_extents == 0) {
6314 * In case of first right shift, we need to initialize next_fsb
6316 if (*next_fsb == NULLFSBLOCK) {
6317 gotp = xfs_iext_get_ext(ifp, total_extents - 1);
6318 xfs_bmbt_get_all(gotp, &got);
6319 *next_fsb = got.br_startoff;
6320 if (stop_fsb > *next_fsb) {
6326 /* Lookup the extent index at which we have to stop */
6327 if (direction == SHIFT_RIGHT) {
6328 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
6329 /* Make stop_extent exclusive of shift range */
6332 stop_extent = total_extents;
6335 * Look up the extent index for the fsb where we start shifting. We can
6336 * henceforth iterate with current_ext as extent list changes are locked
6339 * gotp can be null in 2 cases: 1) if there are no extents or 2)
6340 * *next_fsb lies in a hole beyond which there are no extents. Either
6343 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext);
6349 /* some sanity checking before we finally start shifting extents */
6350 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
6351 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
6356 while (nexts++ < num_exts) {
6357 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
6358 ¤t_ext, gotp, cur, &logflags,
6363 * If there was an extent merge during the shift, the extent
6364 * count can change. Update the total and grade the next record.
6366 if (direction == SHIFT_LEFT) {
6367 total_extents = xfs_iext_count(ifp);
6368 stop_extent = total_extents;
6371 if (current_ext == stop_extent) {
6373 *next_fsb = NULLFSBLOCK;
6376 gotp = xfs_iext_get_ext(ifp, current_ext);
6380 xfs_bmbt_get_all(gotp, &got);
6381 *next_fsb = got.br_startoff;
6386 xfs_btree_del_cursor(cur,
6387 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6390 xfs_trans_log_inode(tp, ip, logflags);
6396 * Splits an extent into two extents at split_fsb block such that it is
6397 * the first block of the current_ext. @current_ext is a target extent
6398 * to be split. @split_fsb is a block where the extents is split.
6399 * If split_fsb lies in a hole or the first block of extents, just return 0.
6402 xfs_bmap_split_extent_at(
6403 struct xfs_trans *tp,
6404 struct xfs_inode *ip,
6405 xfs_fileoff_t split_fsb,
6406 xfs_fsblock_t *firstfsb,
6407 struct xfs_defer_ops *dfops)
6409 int whichfork = XFS_DATA_FORK;
6410 struct xfs_btree_cur *cur = NULL;
6411 struct xfs_bmbt_rec_host *gotp;
6412 struct xfs_bmbt_irec got;
6413 struct xfs_bmbt_irec new; /* split extent */
6414 struct xfs_mount *mp = ip->i_mount;
6415 struct xfs_ifork *ifp;
6416 xfs_fsblock_t gotblkcnt; /* new block count for got */
6417 xfs_extnum_t current_ext;
6422 if (unlikely(XFS_TEST_ERROR(
6423 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6424 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6425 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
6426 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
6427 XFS_ERRLEVEL_LOW, mp);
6428 return -EFSCORRUPTED;
6431 if (XFS_FORCED_SHUTDOWN(mp))
6434 ifp = XFS_IFORK_PTR(ip, whichfork);
6435 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6436 /* Read in all the extents */
6437 error = xfs_iread_extents(tp, ip, whichfork);
6443 * gotp can be null in 2 cases: 1) if there are no extents
6444 * or 2) split_fsb lies in a hole beyond which there are
6445 * no extents. Either way, we are done.
6447 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext);
6451 xfs_bmbt_get_all(gotp, &got);
6454 * Check split_fsb lies in a hole or the start boundary offset
6457 if (got.br_startoff >= split_fsb)
6460 gotblkcnt = split_fsb - got.br_startoff;
6461 new.br_startoff = split_fsb;
6462 new.br_startblock = got.br_startblock + gotblkcnt;
6463 new.br_blockcount = got.br_blockcount - gotblkcnt;
6464 new.br_state = got.br_state;
6466 if (ifp->if_flags & XFS_IFBROOT) {
6467 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6468 cur->bc_private.b.firstblock = *firstfsb;
6469 cur->bc_private.b.dfops = dfops;
6470 cur->bc_private.b.flags = 0;
6471 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
6477 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6480 xfs_bmbt_set_blockcount(gotp, gotblkcnt);
6481 got.br_blockcount = gotblkcnt;
6483 logflags = XFS_ILOG_CORE;
6485 error = xfs_bmbt_update(cur, got.br_startoff,
6492 logflags |= XFS_ILOG_DEXT;
6494 /* Add new extent */
6496 xfs_iext_insert(ip, current_ext, 1, &new, 0);
6497 XFS_IFORK_NEXT_SET(ip, whichfork,
6498 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
6501 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
6502 new.br_startblock, new.br_blockcount,
6506 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
6507 cur->bc_rec.b.br_state = new.br_state;
6509 error = xfs_btree_insert(cur, &i);
6512 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6516 * Convert to a btree if necessary.
6518 if (xfs_bmap_needs_btree(ip, whichfork)) {
6519 int tmp_logflags; /* partial log flag return val */
6521 ASSERT(cur == NULL);
6522 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
6523 &cur, 0, &tmp_logflags, whichfork);
6524 logflags |= tmp_logflags;
6529 cur->bc_private.b.allocated = 0;
6530 xfs_btree_del_cursor(cur,
6531 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6535 xfs_trans_log_inode(tp, ip, logflags);
6540 xfs_bmap_split_extent(
6541 struct xfs_inode *ip,
6542 xfs_fileoff_t split_fsb)
6544 struct xfs_mount *mp = ip->i_mount;
6545 struct xfs_trans *tp;
6546 struct xfs_defer_ops dfops;
6547 xfs_fsblock_t firstfsb;
6550 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6551 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6555 xfs_ilock(ip, XFS_ILOCK_EXCL);
6556 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6558 xfs_defer_init(&dfops, &firstfsb);
6560 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6565 error = xfs_defer_finish(&tp, &dfops, NULL);
6569 return xfs_trans_commit(tp);
6572 xfs_defer_cancel(&dfops);
6573 xfs_trans_cancel(tp);
6577 /* Deferred mapping is only for real extents in the data fork. */
6579 xfs_bmap_is_update_needed(
6580 struct xfs_bmbt_irec *bmap)
6582 return bmap->br_startblock != HOLESTARTBLOCK &&
6583 bmap->br_startblock != DELAYSTARTBLOCK;
6586 /* Record a bmap intent. */
6589 struct xfs_mount *mp,
6590 struct xfs_defer_ops *dfops,
6591 enum xfs_bmap_intent_type type,
6592 struct xfs_inode *ip,
6594 struct xfs_bmbt_irec *bmap)
6597 struct xfs_bmap_intent *bi;
6599 trace_xfs_bmap_defer(mp,
6600 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6602 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6603 ip->i_ino, whichfork,
6605 bmap->br_blockcount,
6608 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6609 INIT_LIST_HEAD(&bi->bi_list);
6612 bi->bi_whichfork = whichfork;
6613 bi->bi_bmap = *bmap;
6615 error = xfs_defer_join(dfops, bi->bi_owner);
6621 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6625 /* Map an extent into a file. */
6627 xfs_bmap_map_extent(
6628 struct xfs_mount *mp,
6629 struct xfs_defer_ops *dfops,
6630 struct xfs_inode *ip,
6631 struct xfs_bmbt_irec *PREV)
6633 if (!xfs_bmap_is_update_needed(PREV))
6636 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6637 XFS_DATA_FORK, PREV);
6640 /* Unmap an extent out of a file. */
6642 xfs_bmap_unmap_extent(
6643 struct xfs_mount *mp,
6644 struct xfs_defer_ops *dfops,
6645 struct xfs_inode *ip,
6646 struct xfs_bmbt_irec *PREV)
6648 if (!xfs_bmap_is_update_needed(PREV))
6651 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6652 XFS_DATA_FORK, PREV);
6656 * Process one of the deferred bmap operations. We pass back the
6657 * btree cursor to maintain our lock on the bmapbt between calls.
6660 xfs_bmap_finish_one(
6661 struct xfs_trans *tp,
6662 struct xfs_defer_ops *dfops,
6663 struct xfs_inode *ip,
6664 enum xfs_bmap_intent_type type,
6666 xfs_fileoff_t startoff,
6667 xfs_fsblock_t startblock,
6668 xfs_filblks_t *blockcount,
6671 struct xfs_bmbt_irec bmap;
6673 xfs_fsblock_t firstfsb;
6674 int flags = XFS_BMAPI_REMAP;
6677 bmap.br_startblock = startblock;
6678 bmap.br_startoff = startoff;
6679 bmap.br_blockcount = *blockcount;
6680 bmap.br_state = state;
6683 * firstfsb is tied to the transaction lifetime and is used to
6684 * ensure correct AG locking order and schedule work item
6685 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
6686 * to only making one bmap call per transaction, so it should
6687 * be safe to have it as a local variable here.
6689 firstfsb = NULLFSBLOCK;
6691 trace_xfs_bmap_deferred(tp->t_mountp,
6692 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6693 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6694 ip->i_ino, whichfork, startoff, *blockcount, state);
6696 if (whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK)
6697 return -EFSCORRUPTED;
6698 if (whichfork == XFS_ATTR_FORK)
6699 flags |= XFS_BMAPI_ATTRFORK;
6701 if (XFS_TEST_ERROR(false, tp->t_mountp,
6702 XFS_ERRTAG_BMAP_FINISH_ONE,
6703 XFS_RANDOM_BMAP_FINISH_ONE))
6708 firstfsb = bmap.br_startblock;
6709 error = xfs_bmapi_write(tp, ip, bmap.br_startoff,
6710 bmap.br_blockcount, flags, &firstfsb,
6711 bmap.br_blockcount, &bmap, &nimaps,
6715 case XFS_BMAP_UNMAP:
6716 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6717 XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
6721 error = -EFSCORRUPTED;