1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2010 David Chinner.
5 * Copyright (c) 2011 Christoph Hellwig.
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_shared.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_alloc.h"
16 #include "xfs_extent_busy.h"
17 #include "xfs_trace.h"
18 #include "xfs_trans.h"
23 xfs_extent_busy_insert_list(
24 struct xfs_perag *pag,
28 struct list_head *busy_list)
30 struct xfs_extent_busy *new;
31 struct xfs_extent_busy *busyp;
33 struct rb_node *parent = NULL;
35 new = kzalloc(sizeof(struct xfs_extent_busy),
36 GFP_KERNEL | __GFP_NOFAIL);
37 new->agno = pag->pag_agno;
40 INIT_LIST_HEAD(&new->list);
43 /* trace before insert to be able to see failed inserts */
44 trace_xfs_extent_busy(pag->pag_mount, pag->pag_agno, bno, len);
46 spin_lock(&pag->pagb_lock);
47 rbp = &pag->pagb_tree.rb_node;
50 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
52 if (new->bno < busyp->bno) {
53 rbp = &(*rbp)->rb_left;
54 ASSERT(new->bno + new->length <= busyp->bno);
55 } else if (new->bno > busyp->bno) {
56 rbp = &(*rbp)->rb_right;
57 ASSERT(bno >= busyp->bno + busyp->length);
63 rb_link_node(&new->rb_node, parent, rbp);
64 rb_insert_color(&new->rb_node, &pag->pagb_tree);
66 /* always process discard lists in fifo order */
67 list_add_tail(&new->list, busy_list);
68 spin_unlock(&pag->pagb_lock);
72 xfs_extent_busy_insert(
74 struct xfs_perag *pag,
79 xfs_extent_busy_insert_list(pag, bno, len, flags, &tp->t_busy);
83 xfs_extent_busy_insert_discard(
84 struct xfs_perag *pag,
87 struct list_head *busy_list)
89 xfs_extent_busy_insert_list(pag, bno, len, XFS_EXTENT_BUSY_DISCARDED,
94 * Search for a busy extent within the range of the extent we are about to
95 * allocate. You need to be holding the busy extent tree lock when calling
96 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
97 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
98 * match. This is done so that a non-zero return indicates an overlap that
99 * will require a synchronous transaction, but it can still be
100 * used to distinguish between a partial or exact match.
103 xfs_extent_busy_search(
104 struct xfs_mount *mp,
105 struct xfs_perag *pag,
110 struct xfs_extent_busy *busyp;
113 /* find closest start bno overlap */
114 spin_lock(&pag->pagb_lock);
115 rbp = pag->pagb_tree.rb_node;
117 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
118 if (bno < busyp->bno) {
119 /* may overlap, but exact start block is lower */
120 if (bno + len > busyp->bno)
123 } else if (bno > busyp->bno) {
124 /* may overlap, but exact start block is higher */
125 if (bno < busyp->bno + busyp->length)
129 /* bno matches busyp, length determines exact match */
130 match = (busyp->length == len) ? 1 : -1;
134 spin_unlock(&pag->pagb_lock);
139 * The found free extent [fbno, fend] overlaps part or all of the given busy
140 * extent. If the overlap covers the beginning, the end, or all of the busy
141 * extent, the overlapping portion can be made unbusy and used for the
142 * allocation. We can't split a busy extent because we can't modify a
143 * transaction/CIL context busy list, but we can update an entry's block
146 * Returns true if the extent can safely be reused, or false if the search
147 * needs to be restarted.
150 xfs_extent_busy_update_extent(
151 struct xfs_mount *mp,
152 struct xfs_perag *pag,
153 struct xfs_extent_busy *busyp,
156 bool userdata) __releases(&pag->pagb_lock)
157 __acquires(&pag->pagb_lock)
159 xfs_agblock_t fend = fbno + flen;
160 xfs_agblock_t bbno = busyp->bno;
161 xfs_agblock_t bend = bbno + busyp->length;
164 * This extent is currently being discarded. Give the thread
165 * performing the discard a chance to mark the extent unbusy
168 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
169 spin_unlock(&pag->pagb_lock);
171 spin_lock(&pag->pagb_lock);
176 * If there is a busy extent overlapping a user allocation, we have
177 * no choice but to force the log and retry the search.
179 * Fortunately this does not happen during normal operation, but
180 * only if the filesystem is very low on space and has to dip into
181 * the AGFL for normal allocations.
186 if (bbno < fbno && bend > fend) {
190 * +BBBBBBBBBBBBBBBBB+
196 * We would have to split the busy extent to be able to track
197 * it correct, which we cannot do because we would have to
198 * modify the list of busy extents attached to the transaction
199 * or CIL context, which is immutable.
201 * Force out the log to clear the busy extent and retry the
205 } else if (bbno >= fbno && bend <= fend) {
209 * +BBBBBBBBBBBBBBBBB+
210 * +-----------------+
215 * +BBBBBBBBBBBBBBBBB+
216 * +--------------------------+
221 * +BBBBBBBBBBBBBBBBB+
222 * +--------------------------+
227 * +BBBBBBBBBBBBBBBBB+
228 * +-----------------------------------+
234 * The busy extent is fully covered by the extent we are
235 * allocating, and can simply be removed from the rbtree.
236 * However we cannot remove it from the immutable list
237 * tracking busy extents in the transaction or CIL context,
238 * so set the length to zero to mark it invalid.
240 * We also need to restart the busy extent search from the
241 * tree root, because erasing the node can rearrange the
244 rb_erase(&busyp->rb_node, &pag->pagb_tree);
247 } else if (fend < bend) {
251 * +BBBBBBBBBBBBBBBBB+
257 * +BBBBBBBBBBBBBBBBB+
258 * +------------------+
263 busyp->length = bend - fend;
264 } else if (bbno < fbno) {
268 * +BBBBBBBBBBBBBBBBB+
274 * +BBBBBBBBBBBBBBBBB+
275 * +----------------------+
278 busyp->length = fbno - busyp->bno;
283 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
287 spin_unlock(&pag->pagb_lock);
288 xfs_log_force(mp, XFS_LOG_SYNC);
289 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
290 spin_lock(&pag->pagb_lock);
296 * For a given extent [fbno, flen], make sure we can reuse it safely.
299 xfs_extent_busy_reuse(
300 struct xfs_mount *mp,
301 struct xfs_perag *pag,
309 spin_lock(&pag->pagb_lock);
311 rbp = pag->pagb_tree.rb_node;
313 struct xfs_extent_busy *busyp =
314 rb_entry(rbp, struct xfs_extent_busy, rb_node);
315 xfs_agblock_t bbno = busyp->bno;
316 xfs_agblock_t bend = bbno + busyp->length;
318 if (fbno + flen <= bbno) {
321 } else if (fbno >= bend) {
326 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
330 spin_unlock(&pag->pagb_lock);
334 * For a given extent [fbno, flen], search the busy extent list to find a
335 * subset of the extent that is not busy. If *rlen is smaller than
336 * args->minlen no suitable extent could be found, and the higher level
337 * code needs to force out the log and retry the allocation.
339 * Return the current busy generation for the AG if the extent is busy. This
340 * value can be used to wait for at least one of the currently busy extents
341 * to be cleared. Note that the busy list is not guaranteed to be empty after
342 * the gen is woken. The state of a specific extent must always be confirmed
343 * with another call to xfs_extent_busy_trim() before it can be used.
346 xfs_extent_busy_trim(
347 struct xfs_alloc_arg *args,
359 spin_lock(&args->pag->pagb_lock);
362 rbp = args->pag->pagb_tree.rb_node;
363 while (rbp && flen >= args->minlen) {
364 struct xfs_extent_busy *busyp =
365 rb_entry(rbp, struct xfs_extent_busy, rb_node);
366 xfs_agblock_t fend = fbno + flen;
367 xfs_agblock_t bbno = busyp->bno;
368 xfs_agblock_t bend = bbno + busyp->length;
373 } else if (fbno >= bend) {
384 * +BBBBBBBBBBBBBBBBB+
390 * +BBBBBBBBBBBBBBBBB+
396 * +BBBBBBBBBBBBBBBBB+
402 * +BBBBBBBBBBBBBBBBB+
403 * +-----------------+
406 * No unbusy region in extent, return failure.
414 * +BBBBBBBBBBBBBBBBB+
415 * +----------------------+
420 * +BBBBBBBBBBBBBBBBB+
421 * +--------------------------+
424 * Needs to be trimmed to:
429 } else if (bend >= fend) {
435 * +BBBBBBBBBBBBBBBBB+
436 * +------------------+
441 * +BBBBBBBBBBBBBBBBB+
442 * +--------------------------+
445 * Needs to be trimmed to:
456 * +BBBBBBBBBBBBBBBBB+
457 * +-----------------------------------+
461 * +-------+ OR +-------+
462 * fbno fend fbno fend
464 * Backward allocation leads to significant
465 * fragmentation of directories, which degrades
466 * directory performance, therefore we always want to
467 * choose the option that produces forward allocation
469 * Preferring the lower bno extent will make the next
470 * request use "fend" as the start of the next
471 * allocation; if the segment is no longer busy at
472 * that point, we'll get a contiguous allocation, but
473 * even if it is still busy, we will get a forward
475 * We try to avoid choosing the segment at "bend",
476 * because that can lead to the next allocation
477 * taking the segment at "fbno", which would be a
478 * backward allocation. We only use the segment at
479 * "fbno" if it is much larger than the current
480 * requested size, because in that case there's a
481 * good chance subsequent allocations will be
484 if (bbno - fbno >= args->maxlen) {
485 /* left candidate fits perfect */
487 } else if (fend - bend >= args->maxlen * 4) {
488 /* right candidate has enough free space */
490 } else if (bbno - fbno >= args->minlen) {
491 /* left candidate fits minimum requirement */
502 if (fbno != *bno || flen != *len) {
503 trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
507 *busy_gen = args->pag->pagb_gen;
510 spin_unlock(&args->pag->pagb_lock);
514 * Return a zero extent length as failure indications. All callers
515 * re-check if the trimmed extent satisfies the minlen requirement.
522 xfs_extent_busy_clear_one(
523 struct xfs_mount *mp,
524 struct xfs_perag *pag,
525 struct xfs_extent_busy *busyp)
528 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
530 rb_erase(&busyp->rb_node, &pag->pagb_tree);
533 list_del_init(&busyp->list);
538 xfs_extent_busy_put_pag(
539 struct xfs_perag *pag,
541 __releases(pag->pagb_lock)
545 wake_up_all(&pag->pagb_wait);
548 spin_unlock(&pag->pagb_lock);
553 * Remove all extents on the passed in list from the busy extents tree.
554 * If do_discard is set skip extents that need to be discarded, and mark
555 * these as undergoing a discard operation instead.
558 xfs_extent_busy_clear(
559 struct xfs_mount *mp,
560 struct list_head *list,
563 struct xfs_extent_busy *busyp, *n;
564 struct xfs_perag *pag = NULL;
565 xfs_agnumber_t agno = NULLAGNUMBER;
568 list_for_each_entry_safe(busyp, n, list, list) {
569 if (busyp->agno != agno) {
571 xfs_extent_busy_put_pag(pag, wakeup);
573 pag = xfs_perag_get(mp, agno);
574 spin_lock(&pag->pagb_lock);
578 if (do_discard && busyp->length &&
579 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
580 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
582 xfs_extent_busy_clear_one(mp, pag, busyp);
588 xfs_extent_busy_put_pag(pag, wakeup);
592 * Flush out all busy extents for this AG.
594 * If the current transaction is holding busy extents, the caller may not want
595 * to wait for committed busy extents to resolve. If we are being told just to
596 * try a flush or progress has been made since we last skipped a busy extent,
597 * return immediately to allow the caller to try again.
599 * If we are freeing extents, we might actually be holding the only free extents
600 * in the transaction busy list and the log force won't resolve that situation.
601 * In this case, we must return -EAGAIN to avoid a deadlock by informing the
602 * caller it needs to commit the busy extents it holds before retrying the
603 * extent free operation.
606 xfs_extent_busy_flush(
607 struct xfs_trans *tp,
608 struct xfs_perag *pag,
610 uint32_t alloc_flags)
615 error = xfs_log_force(tp->t_mountp, XFS_LOG_SYNC);
619 /* Avoid deadlocks on uncommitted busy extents. */
620 if (!list_empty(&tp->t_busy)) {
621 if (alloc_flags & XFS_ALLOC_FLAG_TRYFLUSH)
624 if (busy_gen != READ_ONCE(pag->pagb_gen))
627 if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
631 /* Wait for committed busy extents to resolve. */
633 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
634 if (busy_gen != READ_ONCE(pag->pagb_gen))
639 finish_wait(&pag->pagb_wait, &wait);
644 xfs_extent_busy_wait_all(
645 struct xfs_mount *mp)
647 struct xfs_perag *pag;
651 for_each_perag(mp, agno, pag) {
653 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
654 if (RB_EMPTY_ROOT(&pag->pagb_tree))
658 finish_wait(&pag->pagb_wait, &wait);
663 * Callback for list_sort to sort busy extents by the AG they reside in.
666 xfs_extent_busy_ag_cmp(
668 const struct list_head *l1,
669 const struct list_head *l2)
671 struct xfs_extent_busy *b1 =
672 container_of(l1, struct xfs_extent_busy, list);
673 struct xfs_extent_busy *b2 =
674 container_of(l2, struct xfs_extent_busy, list);
677 diff = b1->agno - b2->agno;
679 diff = b1->bno - b2->bno;
683 /* Are there any busy extents in this AG? */
685 xfs_extent_busy_list_empty(
686 struct xfs_perag *pag)
690 spin_lock(&pag->pagb_lock);
691 res = RB_EMPTY_ROOT(&pag->pagb_tree);
692 spin_unlock(&pag->pagb_lock);