1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_extent_busy.h"
15 #include "xfs_quota.h"
16 #include "xfs_trans.h"
17 #include "xfs_trans_priv.h"
19 #include "xfs_trace.h"
20 #include "xfs_error.h"
21 #include "xfs_defer.h"
23 kmem_zone_t *xfs_trans_zone;
25 #if defined(CONFIG_TRACEPOINTS)
27 xfs_trans_trace_reservations(
30 struct xfs_trans_res resv;
31 struct xfs_trans_res *res;
32 struct xfs_trans_res *end_res;
35 res = (struct xfs_trans_res *)M_RES(mp);
36 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
37 for (i = 0; res < end_res; i++, res++)
38 trace_xfs_trans_resv_calc(mp, i, res);
39 xfs_log_get_max_trans_res(mp, &resv);
40 trace_xfs_trans_resv_calc(mp, -1, &resv);
43 # define xfs_trans_trace_reservations(mp)
47 * Initialize the precomputed transaction reservation values
48 * in the mount structure.
54 xfs_trans_resv_calc(mp, M_RES(mp));
55 xfs_trans_trace_reservations(mp);
59 * Free the transaction structure. If there is more clean up
60 * to do when the structure is freed, add it here.
66 xfs_extent_busy_sort(&tp->t_busy);
67 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
69 trace_xfs_trans_free(tp, _RET_IP_);
70 atomic_dec(&tp->t_mountp->m_active_trans);
71 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
72 sb_end_intwrite(tp->t_mountp->m_super);
73 xfs_trans_free_dqinfo(tp);
74 kmem_zone_free(xfs_trans_zone, tp);
78 * This is called to create a new transaction which will share the
79 * permanent log reservation of the given transaction. The remaining
80 * unused block and rt extent reservations are also inherited. This
81 * implies that the original transaction is no longer allowed to allocate
82 * blocks. Locks and log items, however, are no inherited. They must
83 * be added to the new transaction explicitly.
85 STATIC struct xfs_trans *
89 struct xfs_trans *ntp;
91 trace_xfs_trans_dup(tp, _RET_IP_);
93 ntp = kmem_zone_zalloc(xfs_trans_zone, 0);
96 * Initialize the new transaction structure.
98 ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
99 ntp->t_mountp = tp->t_mountp;
100 INIT_LIST_HEAD(&ntp->t_items);
101 INIT_LIST_HEAD(&ntp->t_busy);
102 INIT_LIST_HEAD(&ntp->t_dfops);
103 ntp->t_firstblock = NULLFSBLOCK;
105 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
106 ASSERT(tp->t_ticket != NULL);
108 ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
109 (tp->t_flags & XFS_TRANS_RESERVE) |
110 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
111 (tp->t_flags & XFS_TRANS_RES_FDBLKS);
112 /* We gave our writer reference to the new transaction */
113 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
114 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
116 ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
117 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
118 tp->t_blk_res = tp->t_blk_res_used;
120 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
121 tp->t_rtx_res = tp->t_rtx_res_used;
122 ntp->t_pflags = tp->t_pflags;
124 /* move deferred ops over to the new tp */
125 xfs_defer_move(ntp, tp);
127 xfs_trans_dup_dqinfo(tp, ntp);
129 atomic_inc(&tp->t_mountp->m_active_trans);
134 * This is called to reserve free disk blocks and log space for the
135 * given transaction. This must be done before allocating any resources
136 * within the transaction.
138 * This will return ENOSPC if there are not enough blocks available.
139 * It will sleep waiting for available log space.
140 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
141 * is used by long running transactions. If any one of the reservations
142 * fails then they will all be backed out.
144 * This does not do quota reservations. That typically is done by the
149 struct xfs_trans *tp,
150 struct xfs_trans_res *resp,
155 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
157 /* Mark this thread as being in a transaction */
158 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
161 * Attempt to reserve the needed disk blocks by decrementing
162 * the number needed from the number available. This will
163 * fail if the count would go below zero.
166 error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
168 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
171 tp->t_blk_res += blocks;
175 * Reserve the log space needed for this transaction.
177 if (resp->tr_logres > 0) {
178 bool permanent = false;
180 ASSERT(tp->t_log_res == 0 ||
181 tp->t_log_res == resp->tr_logres);
182 ASSERT(tp->t_log_count == 0 ||
183 tp->t_log_count == resp->tr_logcount);
185 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
186 tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
189 ASSERT(tp->t_ticket == NULL);
190 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
193 if (tp->t_ticket != NULL) {
194 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
195 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
197 error = xfs_log_reserve(tp->t_mountp,
200 &tp->t_ticket, XFS_TRANSACTION,
207 tp->t_log_res = resp->tr_logres;
208 tp->t_log_count = resp->tr_logcount;
212 * Attempt to reserve the needed realtime extents by decrementing
213 * the number needed from the number available. This will
214 * fail if the count would go below zero.
217 error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
222 tp->t_rtx_res += rtextents;
228 * Error cases jump to one of these labels to undo any
229 * reservations which have already been performed.
232 if (resp->tr_logres > 0) {
233 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
236 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
241 xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
245 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
252 struct xfs_mount *mp,
253 struct xfs_trans_res *resp,
257 struct xfs_trans **tpp)
259 struct xfs_trans *tp;
263 * Allocate the handle before we do our freeze accounting and setting up
264 * GFP_NOFS allocation context so that we avoid lockdep false positives
265 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
267 tp = kmem_zone_zalloc(xfs_trans_zone, 0);
268 if (!(flags & XFS_TRANS_NO_WRITECOUNT))
269 sb_start_intwrite(mp->m_super);
272 * Zero-reservation ("empty") transactions can't modify anything, so
273 * they're allowed to run while we're frozen.
275 WARN_ON(resp->tr_logres > 0 &&
276 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
277 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
278 xfs_sb_version_haslazysbcount(&mp->m_sb));
279 atomic_inc(&mp->m_active_trans);
281 tp->t_magic = XFS_TRANS_HEADER_MAGIC;
284 INIT_LIST_HEAD(&tp->t_items);
285 INIT_LIST_HEAD(&tp->t_busy);
286 INIT_LIST_HEAD(&tp->t_dfops);
287 tp->t_firstblock = NULLFSBLOCK;
289 error = xfs_trans_reserve(tp, resp, blocks, rtextents);
291 xfs_trans_cancel(tp);
295 trace_xfs_trans_alloc(tp, _RET_IP_);
302 * Create an empty transaction with no reservation. This is a defensive
303 * mechanism for routines that query metadata without actually modifying
304 * them -- if the metadata being queried is somehow cross-linked (think a
305 * btree block pointer that points higher in the tree), we risk deadlock.
306 * However, blocks grabbed as part of a transaction can be re-grabbed.
307 * The verifiers will notice the corrupt block and the operation will fail
308 * back to userspace without deadlocking.
310 * Note the zero-length reservation; this transaction MUST be cancelled
311 * without any dirty data.
313 * Callers should obtain freeze protection to avoid two conflicts with fs
314 * freezing: (1) having active transactions trip the m_active_trans ASSERTs;
315 * and (2) grabbing buffers at the same time that freeze is trying to drain
316 * the buffer LRU list.
319 xfs_trans_alloc_empty(
320 struct xfs_mount *mp,
321 struct xfs_trans **tpp)
323 struct xfs_trans_res resv = {0};
325 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
329 * Record the indicated change to the given field for application
330 * to the file system's superblock when the transaction commits.
331 * For now, just store the change in the transaction structure.
333 * Mark the transaction structure to indicate that the superblock
334 * needs to be updated before committing.
336 * Because we may not be keeping track of allocated/free inodes and
337 * used filesystem blocks in the superblock, we do not mark the
338 * superblock dirty in this transaction if we modify these fields.
339 * We still need to update the transaction deltas so that they get
340 * applied to the incore superblock, but we don't want them to
341 * cause the superblock to get locked and logged if these are the
342 * only fields in the superblock that the transaction modifies.
350 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
351 xfs_mount_t *mp = tp->t_mountp;
354 case XFS_TRANS_SB_ICOUNT:
355 tp->t_icount_delta += delta;
356 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
357 flags &= ~XFS_TRANS_SB_DIRTY;
359 case XFS_TRANS_SB_IFREE:
360 tp->t_ifree_delta += delta;
361 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
362 flags &= ~XFS_TRANS_SB_DIRTY;
364 case XFS_TRANS_SB_FDBLOCKS:
366 * Track the number of blocks allocated in the transaction.
367 * Make sure it does not exceed the number reserved. If so,
368 * shutdown as this can lead to accounting inconsistency.
371 tp->t_blk_res_used += (uint)-delta;
372 if (tp->t_blk_res_used > tp->t_blk_res)
373 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
374 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
375 int64_t blkres_delta;
378 * Return freed blocks directly to the reservation
379 * instead of the global pool, being careful not to
380 * overflow the trans counter. This is used to preserve
381 * reservation across chains of transaction rolls that
382 * repeatedly free and allocate blocks.
384 blkres_delta = min_t(int64_t, delta,
385 UINT_MAX - tp->t_blk_res);
386 tp->t_blk_res += blkres_delta;
387 delta -= blkres_delta;
389 tp->t_fdblocks_delta += delta;
390 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
391 flags &= ~XFS_TRANS_SB_DIRTY;
393 case XFS_TRANS_SB_RES_FDBLOCKS:
395 * The allocation has already been applied to the
396 * in-core superblock's counter. This should only
397 * be applied to the on-disk superblock.
399 tp->t_res_fdblocks_delta += delta;
400 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
401 flags &= ~XFS_TRANS_SB_DIRTY;
403 case XFS_TRANS_SB_FREXTENTS:
405 * Track the number of blocks allocated in the
406 * transaction. Make sure it does not exceed the
410 tp->t_rtx_res_used += (uint)-delta;
411 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
413 tp->t_frextents_delta += delta;
415 case XFS_TRANS_SB_RES_FREXTENTS:
417 * The allocation has already been applied to the
418 * in-core superblock's counter. This should only
419 * be applied to the on-disk superblock.
422 tp->t_res_frextents_delta += delta;
424 case XFS_TRANS_SB_DBLOCKS:
426 tp->t_dblocks_delta += delta;
428 case XFS_TRANS_SB_AGCOUNT:
430 tp->t_agcount_delta += delta;
432 case XFS_TRANS_SB_IMAXPCT:
433 tp->t_imaxpct_delta += delta;
435 case XFS_TRANS_SB_REXTSIZE:
436 tp->t_rextsize_delta += delta;
438 case XFS_TRANS_SB_RBMBLOCKS:
439 tp->t_rbmblocks_delta += delta;
441 case XFS_TRANS_SB_RBLOCKS:
442 tp->t_rblocks_delta += delta;
444 case XFS_TRANS_SB_REXTENTS:
445 tp->t_rextents_delta += delta;
447 case XFS_TRANS_SB_REXTSLOG:
448 tp->t_rextslog_delta += delta;
455 tp->t_flags |= flags;
459 * xfs_trans_apply_sb_deltas() is called from the commit code
460 * to bring the superblock buffer into the current transaction
461 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
463 * For now we just look at each field allowed to change and change
467 xfs_trans_apply_sb_deltas(
474 bp = xfs_trans_getsb(tp, tp->t_mountp);
475 sbp = XFS_BUF_TO_SBP(bp);
478 * Check that superblock mods match the mods made to AGF counters.
480 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
481 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
482 tp->t_ag_btree_delta));
485 * Only update the superblock counters if we are logging them
487 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
488 if (tp->t_icount_delta)
489 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
490 if (tp->t_ifree_delta)
491 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
492 if (tp->t_fdblocks_delta)
493 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
494 if (tp->t_res_fdblocks_delta)
495 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
498 if (tp->t_frextents_delta)
499 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
500 if (tp->t_res_frextents_delta)
501 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
503 if (tp->t_dblocks_delta) {
504 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
507 if (tp->t_agcount_delta) {
508 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
511 if (tp->t_imaxpct_delta) {
512 sbp->sb_imax_pct += tp->t_imaxpct_delta;
515 if (tp->t_rextsize_delta) {
516 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
519 if (tp->t_rbmblocks_delta) {
520 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
523 if (tp->t_rblocks_delta) {
524 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
527 if (tp->t_rextents_delta) {
528 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
531 if (tp->t_rextslog_delta) {
532 sbp->sb_rextslog += tp->t_rextslog_delta;
536 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
539 * Log the whole thing, the fields are noncontiguous.
541 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
544 * Since all the modifiable fields are contiguous, we
545 * can get away with this.
547 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
548 offsetof(xfs_dsb_t, sb_frextents) +
549 sizeof(sbp->sb_frextents) - 1);
553 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
554 * apply superblock counter changes to the in-core superblock. The
555 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
556 * applied to the in-core superblock. The idea is that that has already been
559 * If we are not logging superblock counters, then the inode allocated/free and
560 * used block counts are not updated in the on disk superblock. In this case,
561 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
562 * still need to update the incore superblock with the changes.
565 xfs_trans_unreserve_and_mod_sb(
566 struct xfs_trans *tp)
568 struct xfs_mount *mp = tp->t_mountp;
569 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
570 int64_t blkdelta = 0;
571 int64_t rtxdelta = 0;
573 int64_t ifreedelta = 0;
576 /* calculate deltas */
577 if (tp->t_blk_res > 0)
578 blkdelta = tp->t_blk_res;
579 if ((tp->t_fdblocks_delta != 0) &&
580 (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
581 (tp->t_flags & XFS_TRANS_SB_DIRTY)))
582 blkdelta += tp->t_fdblocks_delta;
584 if (tp->t_rtx_res > 0)
585 rtxdelta = tp->t_rtx_res;
586 if ((tp->t_frextents_delta != 0) &&
587 (tp->t_flags & XFS_TRANS_SB_DIRTY))
588 rtxdelta += tp->t_frextents_delta;
590 if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
591 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
592 idelta = tp->t_icount_delta;
593 ifreedelta = tp->t_ifree_delta;
596 /* apply the per-cpu counters */
598 error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
603 error = xfs_mod_icount(mp, idelta);
608 error = xfs_mod_ifree(mp, ifreedelta);
612 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
615 /* apply remaining deltas */
616 spin_lock(&mp->m_sb_lock);
617 mp->m_sb.sb_frextents += rtxdelta;
618 mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
619 mp->m_sb.sb_agcount += tp->t_agcount_delta;
620 mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
621 mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
622 mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
623 mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
624 mp->m_sb.sb_rextents += tp->t_rextents_delta;
625 mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
626 spin_unlock(&mp->m_sb_lock);
629 * Debug checks outside of the spinlock so they don't lock up the
630 * machine if they fail.
632 ASSERT(mp->m_sb.sb_imax_pct >= 0);
633 ASSERT(mp->m_sb.sb_rextslog >= 0);
637 /* Add the given log item to the transaction's list of log items. */
640 struct xfs_trans *tp,
641 struct xfs_log_item *lip)
643 ASSERT(lip->li_mountp == tp->t_mountp);
644 ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
645 ASSERT(list_empty(&lip->li_trans));
646 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
648 list_add_tail(&lip->li_trans, &tp->t_items);
649 trace_xfs_trans_add_item(tp, _RET_IP_);
653 * Unlink the log item from the transaction. the log item is no longer
654 * considered dirty in this transaction, as the linked transaction has
655 * finished, either by abort or commit completion.
659 struct xfs_log_item *lip)
661 clear_bit(XFS_LI_DIRTY, &lip->li_flags);
662 list_del_init(&lip->li_trans);
665 /* Detach and unlock all of the items in a transaction */
667 xfs_trans_free_items(
668 struct xfs_trans *tp,
671 struct xfs_log_item *lip, *next;
673 trace_xfs_trans_free_items(tp, _RET_IP_);
675 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
676 xfs_trans_del_item(lip);
678 set_bit(XFS_LI_ABORTED, &lip->li_flags);
679 if (lip->li_ops->iop_release)
680 lip->li_ops->iop_release(lip);
685 xfs_log_item_batch_insert(
686 struct xfs_ail *ailp,
687 struct xfs_ail_cursor *cur,
688 struct xfs_log_item **log_items,
690 xfs_lsn_t commit_lsn)
694 spin_lock(&ailp->ail_lock);
695 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
696 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
698 for (i = 0; i < nr_items; i++) {
699 struct xfs_log_item *lip = log_items[i];
701 if (lip->li_ops->iop_unpin)
702 lip->li_ops->iop_unpin(lip, 0);
707 * Bulk operation version of xfs_trans_committed that takes a log vector of
708 * items to insert into the AIL. This uses bulk AIL insertion techniques to
709 * minimise lock traffic.
711 * If we are called with the aborted flag set, it is because a log write during
712 * a CIL checkpoint commit has failed. In this case, all the items in the
713 * checkpoint have already gone through iop_committed and iop_committing, which
714 * means that checkpoint commit abort handling is treated exactly the same
715 * as an iclog write error even though we haven't started any IO yet. Hence in
716 * this case all we need to do is iop_committed processing, followed by an
717 * iop_unpin(aborted) call.
719 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
720 * at the end of the AIL, the insert cursor avoids the need to walk
721 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
722 * call. This saves a lot of needless list walking and is a net win, even
723 * though it slightly increases that amount of AIL lock traffic to set it up
727 xfs_trans_committed_bulk(
728 struct xfs_ail *ailp,
729 struct xfs_log_vec *log_vector,
730 xfs_lsn_t commit_lsn,
733 #define LOG_ITEM_BATCH_SIZE 32
734 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
735 struct xfs_log_vec *lv;
736 struct xfs_ail_cursor cur;
739 spin_lock(&ailp->ail_lock);
740 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
741 spin_unlock(&ailp->ail_lock);
743 /* unpin all the log items */
744 for (lv = log_vector; lv; lv = lv->lv_next ) {
745 struct xfs_log_item *lip = lv->lv_item;
749 set_bit(XFS_LI_ABORTED, &lip->li_flags);
751 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
752 lip->li_ops->iop_release(lip);
756 if (lip->li_ops->iop_committed)
757 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
759 item_lsn = commit_lsn;
761 /* item_lsn of -1 means the item needs no further processing */
762 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
766 * if we are aborting the operation, no point in inserting the
767 * object into the AIL as we are in a shutdown situation.
770 ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
771 if (lip->li_ops->iop_unpin)
772 lip->li_ops->iop_unpin(lip, 1);
776 if (item_lsn != commit_lsn) {
779 * Not a bulk update option due to unusual item_lsn.
780 * Push into AIL immediately, rechecking the lsn once
781 * we have the ail lock. Then unpin the item. This does
782 * not affect the AIL cursor the bulk insert path is
785 spin_lock(&ailp->ail_lock);
786 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
787 xfs_trans_ail_update(ailp, lip, item_lsn);
789 spin_unlock(&ailp->ail_lock);
790 if (lip->li_ops->iop_unpin)
791 lip->li_ops->iop_unpin(lip, 0);
795 /* Item is a candidate for bulk AIL insert. */
796 log_items[i++] = lv->lv_item;
797 if (i >= LOG_ITEM_BATCH_SIZE) {
798 xfs_log_item_batch_insert(ailp, &cur, log_items,
799 LOG_ITEM_BATCH_SIZE, commit_lsn);
804 /* make sure we insert the remainder! */
806 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
808 spin_lock(&ailp->ail_lock);
809 xfs_trans_ail_cursor_done(&cur);
810 spin_unlock(&ailp->ail_lock);
814 * Commit the given transaction to the log.
816 * XFS disk error handling mechanism is not based on a typical
817 * transaction abort mechanism. Logically after the filesystem
818 * gets marked 'SHUTDOWN', we can't let any new transactions
819 * be durable - ie. committed to disk - because some metadata might
820 * be inconsistent. In such cases, this returns an error, and the
821 * caller may assume that all locked objects joined to the transaction
822 * have already been unlocked as if the commit had succeeded.
823 * Do not reference the transaction structure after this call.
827 struct xfs_trans *tp,
830 struct xfs_mount *mp = tp->t_mountp;
831 xfs_lsn_t commit_lsn = -1;
833 int sync = tp->t_flags & XFS_TRANS_SYNC;
835 trace_xfs_trans_commit(tp, _RET_IP_);
838 * Finish deferred items on final commit. Only permanent transactions
839 * should ever have deferred ops.
841 WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
842 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
843 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
844 error = xfs_defer_finish_noroll(&tp);
850 * If there is nothing to be logged by the transaction,
851 * then unlock all of the items associated with the
852 * transaction and free the transaction structure.
853 * Also make sure to return any reserved blocks to
856 if (!(tp->t_flags & XFS_TRANS_DIRTY))
859 if (XFS_FORCED_SHUTDOWN(mp)) {
864 ASSERT(tp->t_ticket != NULL);
867 * If we need to update the superblock, then do it now.
869 if (tp->t_flags & XFS_TRANS_SB_DIRTY)
870 xfs_trans_apply_sb_deltas(tp);
871 xfs_trans_apply_dquot_deltas(tp);
873 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
875 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
879 * If the transaction needs to be synchronous, then force the
880 * log out now and wait for it.
883 error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
884 XFS_STATS_INC(mp, xs_trans_sync);
886 XFS_STATS_INC(mp, xs_trans_async);
892 xfs_trans_unreserve_and_mod_sb(tp);
895 * It is indeed possible for the transaction to be not dirty but
896 * the dqinfo portion to be. All that means is that we have some
897 * (non-persistent) quota reservations that need to be unreserved.
899 xfs_trans_unreserve_and_mod_dquots(tp);
901 commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
902 if (commit_lsn == -1 && !error)
906 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
907 xfs_trans_free_items(tp, !!error);
910 XFS_STATS_INC(mp, xs_trans_empty);
916 struct xfs_trans *tp)
918 return __xfs_trans_commit(tp, false);
922 * Unlock all of the transaction's items and free the transaction.
923 * The transaction must not have modified any of its items, because
924 * there is no way to restore them to their previous state.
926 * If the transaction has made a log reservation, make sure to release
931 struct xfs_trans *tp)
933 struct xfs_mount *mp = tp->t_mountp;
934 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY);
936 trace_xfs_trans_cancel(tp, _RET_IP_);
938 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
939 xfs_defer_cancel(tp);
942 * See if the caller is relying on us to shut down the
943 * filesystem. This happens in paths where we detect
944 * corruption and decide to give up.
946 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
947 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
948 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
951 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
952 struct xfs_log_item *lip;
954 list_for_each_entry(lip, &tp->t_items, li_trans)
955 ASSERT(!(lip->li_type == XFS_LI_EFD));
958 xfs_trans_unreserve_and_mod_sb(tp);
959 xfs_trans_unreserve_and_mod_dquots(tp);
962 xfs_log_done(mp, tp->t_ticket, NULL, false);
966 /* mark this thread as no longer being in a transaction */
967 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
969 xfs_trans_free_items(tp, dirty);
974 * Roll from one trans in the sequence of PERMANENT transactions to
975 * the next: permanent transactions are only flushed out when
976 * committed with xfs_trans_commit(), but we still want as soon
977 * as possible to let chunks of it go to the log. So we commit the
978 * chunk we've been working on and get a new transaction to continue.
982 struct xfs_trans **tpp)
984 struct xfs_trans *trans = *tpp;
985 struct xfs_trans_res tres;
988 trace_xfs_trans_roll(trans, _RET_IP_);
991 * Copy the critical parameters from one trans to the next.
993 tres.tr_logres = trans->t_log_res;
994 tres.tr_logcount = trans->t_log_count;
996 *tpp = xfs_trans_dup(trans);
999 * Commit the current transaction.
1000 * If this commit failed, then it'd just unlock those items that
1001 * are not marked ihold. That also means that a filesystem shutdown
1002 * is in progress. The caller takes the responsibility to cancel
1003 * the duplicate transaction that gets returned.
1005 error = __xfs_trans_commit(trans, true);
1010 * Reserve space in the log for the next transaction.
1011 * This also pushes items in the "AIL", the list of logged items,
1012 * out to disk if they are taking up space at the tail of the log
1013 * that we want to use. This requires that either nothing be locked
1014 * across this call, or that anything that is locked be logged in
1015 * the prior and the next transactions.
1017 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1018 return xfs_trans_reserve(*tpp, &tres, 0, 0);