1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
17 #include "xfs_log_priv.h"
18 #include "xfs_trace.h"
20 struct workqueue_struct *xfs_discard_wq;
23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
24 * recover, so we don't allow failure here. Also, we allocate in a context that
25 * we don't want to be issuing transactions from, so we need to tell the
26 * allocation code this as well.
28 * We don't reserve any space for the ticket - we are going to steal whatever
29 * space we require from transactions as they commit. To ensure we reserve all
30 * the space required, we need to set the current reservation of the ticket to
31 * zero so that we know to steal the initial transaction overhead from the
32 * first transaction commit.
34 static struct xlog_ticket *
35 xlog_cil_ticket_alloc(
38 struct xlog_ticket *tic;
40 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0);
43 * set the current reservation to zero so we know to steal the basic
44 * transaction overhead reservation from the first transaction commit.
51 * After the first stage of log recovery is done, we know where the head and
52 * tail of the log are. We need this log initialisation done before we can
53 * initialise the first CIL checkpoint context.
55 * Here we allocate a log ticket to track space usage during a CIL push. This
56 * ticket is passed to xlog_write() directly so that we don't slowly leak log
57 * space by failing to account for space used by log headers and additional
58 * region headers for split regions.
61 xlog_cil_init_post_recovery(
64 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
65 log->l_cilp->xc_ctx->sequence = 1;
72 return round_up((sizeof(struct xfs_log_vec) +
73 niovecs * sizeof(struct xfs_log_iovec)),
78 * Allocate or pin log vector buffers for CIL insertion.
80 * The CIL currently uses disposable buffers for copying a snapshot of the
81 * modified items into the log during a push. The biggest problem with this is
82 * the requirement to allocate the disposable buffer during the commit if:
83 * a) does not exist; or
86 * If we do this allocation within xlog_cil_insert_format_items(), it is done
87 * under the xc_ctx_lock, which means that a CIL push cannot occur during
88 * the memory allocation. This means that we have a potential deadlock situation
89 * under low memory conditions when we have lots of dirty metadata pinned in
90 * the CIL and we need a CIL commit to occur to free memory.
92 * To avoid this, we need to move the memory allocation outside the
93 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
94 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
95 * vector buffers between the check and the formatting of the item into the
96 * log vector buffer within the xc_ctx_lock.
98 * Because the log vector buffer needs to be unchanged during the CIL push
99 * process, we cannot share the buffer between the transaction commit (which
100 * modifies the buffer) and the CIL push context that is writing the changes
101 * into the log. This means skipping preallocation of buffer space is
102 * unreliable, but we most definitely do not want to be allocating and freeing
103 * buffers unnecessarily during commits when overwrites can be done safely.
105 * The simplest solution to this problem is to allocate a shadow buffer when a
106 * log item is committed for the second time, and then to only use this buffer
107 * if necessary. The buffer can remain attached to the log item until such time
108 * it is needed, and this is the buffer that is reallocated to match the size of
109 * the incoming modification. Then during the formatting of the item we can swap
110 * the active buffer with the new one if we can't reuse the existing buffer. We
111 * don't free the old buffer as it may be reused on the next modification if
112 * it's size is right, otherwise we'll free and reallocate it at that point.
114 * This function builds a vector for the changes in each log item in the
115 * transaction. It then works out the length of the buffer needed for each log
116 * item, allocates them and attaches the vector to the log item in preparation
117 * for the formatting step which occurs under the xc_ctx_lock.
119 * While this means the memory footprint goes up, it avoids the repeated
120 * alloc/free pattern that repeated modifications of an item would otherwise
121 * cause, and hence minimises the CPU overhead of such behaviour.
124 xlog_cil_alloc_shadow_bufs(
126 struct xfs_trans *tp)
128 struct xfs_log_item *lip;
130 list_for_each_entry(lip, &tp->t_items, li_trans) {
131 struct xfs_log_vec *lv;
135 bool ordered = false;
137 /* Skip items which aren't dirty in this transaction. */
138 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
141 /* get number of vecs and size of data to be stored */
142 lip->li_ops->iop_size(lip, &niovecs, &nbytes);
145 * Ordered items need to be tracked but we do not wish to write
146 * them. We need a logvec to track the object, but we do not
147 * need an iovec or buffer to be allocated for copying data.
149 if (niovecs == XFS_LOG_VEC_ORDERED) {
156 * We 64-bit align the length of each iovec so that the start
157 * of the next one is naturally aligned. We'll need to
158 * account for that slack space here. Then round nbytes up
159 * to 64-bit alignment so that the initial buffer alignment is
160 * easy to calculate and verify.
162 nbytes += niovecs * sizeof(uint64_t);
163 nbytes = round_up(nbytes, sizeof(uint64_t));
166 * The data buffer needs to start 64-bit aligned, so round up
167 * that space to ensure we can align it appropriately and not
168 * overrun the buffer.
170 buf_size = nbytes + xlog_cil_iovec_space(niovecs);
173 * if we have no shadow buffer, or it is too small, we need to
176 if (!lip->li_lv_shadow ||
177 buf_size > lip->li_lv_shadow->lv_size) {
180 * We free and allocate here as a realloc would copy
181 * unnecessary data. We don't use kmem_zalloc() for the
182 * same reason - we don't need to zero the data area in
183 * the buffer, only the log vector header and the iovec
186 kmem_free(lip->li_lv_shadow);
188 lv = kmem_alloc_large(buf_size, KM_NOFS);
189 memset(lv, 0, xlog_cil_iovec_space(niovecs));
192 lv->lv_size = buf_size;
194 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
196 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
197 lip->li_lv_shadow = lv;
199 /* same or smaller, optimise common overwrite case */
200 lv = lip->li_lv_shadow;
202 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
209 /* Ensure the lv is set up according to ->iop_size */
210 lv->lv_niovecs = niovecs;
212 /* The allocated data region lies beyond the iovec region */
213 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
219 * Prepare the log item for insertion into the CIL. Calculate the difference in
220 * log space and vectors it will consume, and if it is a new item pin it as
224 xfs_cil_prepare_item(
226 struct xfs_log_vec *lv,
227 struct xfs_log_vec *old_lv,
231 /* Account for the new LV being passed in */
232 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
233 *diff_len += lv->lv_bytes;
234 *diff_iovecs += lv->lv_niovecs;
238 * If there is no old LV, this is the first time we've seen the item in
239 * this CIL context and so we need to pin it. If we are replacing the
240 * old_lv, then remove the space it accounts for and make it the shadow
241 * buffer for later freeing. In both cases we are now switching to the
242 * shadow buffer, so update the pointer to it appropriately.
245 if (lv->lv_item->li_ops->iop_pin)
246 lv->lv_item->li_ops->iop_pin(lv->lv_item);
247 lv->lv_item->li_lv_shadow = NULL;
248 } else if (old_lv != lv) {
249 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
251 *diff_len -= old_lv->lv_bytes;
252 *diff_iovecs -= old_lv->lv_niovecs;
253 lv->lv_item->li_lv_shadow = old_lv;
256 /* attach new log vector to log item */
257 lv->lv_item->li_lv = lv;
260 * If this is the first time the item is being committed to the
261 * CIL, store the sequence number on the log item so we can
262 * tell in future commits whether this is the first checkpoint
263 * the item is being committed into.
265 if (!lv->lv_item->li_seq)
266 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
270 * Format log item into a flat buffers
272 * For delayed logging, we need to hold a formatted buffer containing all the
273 * changes on the log item. This enables us to relog the item in memory and
274 * write it out asynchronously without needing to relock the object that was
275 * modified at the time it gets written into the iclog.
277 * This function takes the prepared log vectors attached to each log item, and
278 * formats the changes into the log vector buffer. The buffer it uses is
279 * dependent on the current state of the vector in the CIL - the shadow lv is
280 * guaranteed to be large enough for the current modification, but we will only
281 * use that if we can't reuse the existing lv. If we can't reuse the existing
282 * lv, then simple swap it out for the shadow lv. We don't free it - that is
283 * done lazily either by th enext modification or the freeing of the log item.
285 * We don't set up region headers during this process; we simply copy the
286 * regions into the flat buffer. We can do this because we still have to do a
287 * formatting step to write the regions into the iclog buffer. Writing the
288 * ophdrs during the iclog write means that we can support splitting large
289 * regions across iclog boundares without needing a change in the format of the
290 * item/region encapsulation.
292 * Hence what we need to do now is change the rewrite the vector array to point
293 * to the copied region inside the buffer we just allocated. This allows us to
294 * format the regions into the iclog as though they are being formatted
295 * directly out of the objects themselves.
298 xlog_cil_insert_format_items(
300 struct xfs_trans *tp,
304 struct xfs_log_item *lip;
307 /* Bail out if we didn't find a log item. */
308 if (list_empty(&tp->t_items)) {
313 list_for_each_entry(lip, &tp->t_items, li_trans) {
314 struct xfs_log_vec *lv;
315 struct xfs_log_vec *old_lv = NULL;
316 struct xfs_log_vec *shadow;
317 bool ordered = false;
319 /* Skip items which aren't dirty in this transaction. */
320 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
324 * The formatting size information is already attached to
325 * the shadow lv on the log item.
327 shadow = lip->li_lv_shadow;
328 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
331 /* Skip items that do not have any vectors for writing */
332 if (!shadow->lv_niovecs && !ordered)
335 /* compare to existing item size */
337 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
338 /* same or smaller, optimise common overwrite case */
346 * set the item up as though it is a new insertion so
347 * that the space reservation accounting is correct.
349 *diff_iovecs -= lv->lv_niovecs;
350 *diff_len -= lv->lv_bytes;
352 /* Ensure the lv is set up according to ->iop_size */
353 lv->lv_niovecs = shadow->lv_niovecs;
355 /* reset the lv buffer information for new formatting */
358 lv->lv_buf = (char *)lv +
359 xlog_cil_iovec_space(lv->lv_niovecs);
361 /* switch to shadow buffer! */
365 /* track as an ordered logvec */
366 ASSERT(lip->li_lv == NULL);
371 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
372 lip->li_ops->iop_format(lip, lv);
374 xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
379 * Insert the log items into the CIL and calculate the difference in space
380 * consumed by the item. Add the space to the checkpoint ticket and calculate
381 * if the change requires additional log metadata. If it does, take that space
382 * as well. Remove the amount of space we added to the checkpoint ticket from
383 * the current transaction ticket so that the accounting works out correctly.
386 xlog_cil_insert_items(
388 struct xfs_trans *tp)
390 struct xfs_cil *cil = log->l_cilp;
391 struct xfs_cil_ctx *ctx = cil->xc_ctx;
392 struct xfs_log_item *lip;
396 int iovhdr_res = 0, split_res = 0, ctx_res = 0;
401 * We can do this safely because the context can't checkpoint until we
402 * are done so it doesn't matter exactly how we update the CIL.
404 xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
406 spin_lock(&cil->xc_cil_lock);
408 /* account for space used by new iovec headers */
409 iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t);
411 ctx->nvecs += diff_iovecs;
413 /* attach the transaction to the CIL if it has any busy extents */
414 if (!list_empty(&tp->t_busy))
415 list_splice_init(&tp->t_busy, &ctx->busy_extents);
418 * Now transfer enough transaction reservation to the context ticket
419 * for the checkpoint. The context ticket is special - the unit
420 * reservation has to grow as well as the current reservation as we
421 * steal from tickets so we can correctly determine the space used
422 * during the transaction commit.
424 if (ctx->ticket->t_curr_res == 0) {
425 ctx_res = ctx->ticket->t_unit_res;
426 ctx->ticket->t_curr_res = ctx_res;
427 tp->t_ticket->t_curr_res -= ctx_res;
430 /* do we need space for more log record headers? */
431 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
432 if (len > 0 && (ctx->space_used / iclog_space !=
433 (ctx->space_used + len) / iclog_space)) {
434 split_res = (len + iclog_space - 1) / iclog_space;
435 /* need to take into account split region headers, too */
436 split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
437 ctx->ticket->t_unit_res += split_res;
438 ctx->ticket->t_curr_res += split_res;
439 tp->t_ticket->t_curr_res -= split_res;
440 ASSERT(tp->t_ticket->t_curr_res >= len);
442 tp->t_ticket->t_curr_res -= len;
443 ctx->space_used += len;
446 * If we've overrun the reservation, dump the tx details before we move
447 * the log items. Shutdown is imminent...
449 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
450 xfs_warn(log->l_mp, "Transaction log reservation overrun:");
452 " log items: %d bytes (iov hdrs: %d bytes)",
454 xfs_warn(log->l_mp, " split region headers: %d bytes",
456 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
457 xlog_print_trans(tp);
461 * Now (re-)position everything modified at the tail of the CIL.
462 * We do this here so we only need to take the CIL lock once during
463 * the transaction commit.
465 list_for_each_entry(lip, &tp->t_items, li_trans) {
467 /* Skip items which aren't dirty in this transaction. */
468 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
472 * Only move the item if it isn't already at the tail. This is
473 * to prevent a transient list_empty() state when reinserting
474 * an item that is already the only item in the CIL.
476 if (!list_is_last(&lip->li_cil, &cil->xc_cil))
477 list_move_tail(&lip->li_cil, &cil->xc_cil);
480 spin_unlock(&cil->xc_cil_lock);
482 if (tp->t_ticket->t_curr_res < 0)
483 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
487 xlog_cil_free_logvec(
488 struct xfs_log_vec *log_vector)
490 struct xfs_log_vec *lv;
492 for (lv = log_vector; lv; ) {
493 struct xfs_log_vec *next = lv->lv_next;
500 xlog_discard_endio_work(
501 struct work_struct *work)
503 struct xfs_cil_ctx *ctx =
504 container_of(work, struct xfs_cil_ctx, discard_endio_work);
505 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
507 xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
512 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
513 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might
514 * get the execution delayed up to 30 seconds for weird reasons.
520 struct xfs_cil_ctx *ctx = bio->bi_private;
522 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
523 queue_work(xfs_discard_wq, &ctx->discard_endio_work);
528 xlog_discard_busy_extents(
529 struct xfs_mount *mp,
530 struct xfs_cil_ctx *ctx)
532 struct list_head *list = &ctx->busy_extents;
533 struct xfs_extent_busy *busyp;
534 struct bio *bio = NULL;
535 struct blk_plug plug;
538 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
540 blk_start_plug(&plug);
541 list_for_each_entry(busyp, list, list) {
542 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
545 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
546 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
547 XFS_FSB_TO_BB(mp, busyp->length),
549 if (error && error != -EOPNOTSUPP) {
551 "discard failed for extent [0x%llx,%u], error %d",
552 (unsigned long long)busyp->bno,
560 bio->bi_private = ctx;
561 bio->bi_end_io = xlog_discard_endio;
564 xlog_discard_endio_work(&ctx->discard_endio_work);
566 blk_finish_plug(&plug);
570 * Mark all items committed and clear busy extents. We free the log vector
571 * chains in a separate pass so that we unpin the log items as quickly as
576 struct xfs_cil_ctx *ctx)
578 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
579 bool abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log);
582 * If the I/O failed, we're aborting the commit and already shutdown.
583 * Wake any commit waiters before aborting the log items so we don't
584 * block async log pushers on callbacks. Async log pushers explicitly do
585 * not wait on log force completion because they may be holding locks
586 * required to unpin items.
589 spin_lock(&ctx->cil->xc_push_lock);
590 wake_up_all(&ctx->cil->xc_commit_wait);
591 spin_unlock(&ctx->cil->xc_push_lock);
594 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
595 ctx->start_lsn, abort);
597 xfs_extent_busy_sort(&ctx->busy_extents);
598 xfs_extent_busy_clear(mp, &ctx->busy_extents,
599 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
601 spin_lock(&ctx->cil->xc_push_lock);
602 list_del(&ctx->committing);
603 spin_unlock(&ctx->cil->xc_push_lock);
605 xlog_cil_free_logvec(ctx->lv_chain);
607 if (!list_empty(&ctx->busy_extents))
608 xlog_discard_busy_extents(mp, ctx);
614 xlog_cil_process_committed(
615 struct list_head *list)
617 struct xfs_cil_ctx *ctx;
619 while ((ctx = list_first_entry_or_null(list,
620 struct xfs_cil_ctx, iclog_entry))) {
621 list_del(&ctx->iclog_entry);
622 xlog_cil_committed(ctx);
627 * Push the Committed Item List to the log.
629 * If the current sequence is the same as xc_push_seq we need to do a flush. If
630 * xc_push_seq is less than the current sequence, then it has already been
631 * flushed and we don't need to do anything - the caller will wait for it to
632 * complete if necessary.
634 * xc_push_seq is checked unlocked against the sequence number for a match.
635 * Hence we can allow log forces to run racily and not issue pushes for the
636 * same sequence twice. If we get a race between multiple pushes for the same
637 * sequence they will block on the first one and then abort, hence avoiding
642 struct work_struct *work)
644 struct xfs_cil *cil =
645 container_of(work, struct xfs_cil, xc_push_work);
646 struct xlog *log = cil->xc_log;
647 struct xfs_log_vec *lv;
648 struct xfs_cil_ctx *ctx;
649 struct xfs_cil_ctx *new_ctx;
650 struct xlog_in_core *commit_iclog;
651 struct xlog_ticket *tic;
654 struct xfs_trans_header thdr;
655 struct xfs_log_iovec lhdr;
656 struct xfs_log_vec lvhdr = { NULL };
657 xfs_lsn_t commit_lsn;
660 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
661 new_ctx->ticket = xlog_cil_ticket_alloc(log);
663 down_write(&cil->xc_ctx_lock);
666 spin_lock(&cil->xc_push_lock);
667 push_seq = cil->xc_push_seq;
668 ASSERT(push_seq <= ctx->sequence);
671 * As we are about to switch to a new, empty CIL context, we no longer
672 * need to throttle tasks on CIL space overruns. Wake any waiters that
673 * the hard push throttle may have caught so they can start committing
674 * to the new context. The ctx->xc_push_lock provides the serialisation
675 * necessary for safely using the lockless waitqueue_active() check in
678 if (waitqueue_active(&cil->xc_push_wait))
679 wake_up_all(&cil->xc_push_wait);
682 * Check if we've anything to push. If there is nothing, then we don't
683 * move on to a new sequence number and so we have to be able to push
684 * this sequence again later.
686 if (list_empty(&cil->xc_cil)) {
687 cil->xc_push_seq = 0;
688 spin_unlock(&cil->xc_push_lock);
693 /* check for a previously pushed sequence */
694 if (push_seq < cil->xc_ctx->sequence) {
695 spin_unlock(&cil->xc_push_lock);
700 * We are now going to push this context, so add it to the committing
701 * list before we do anything else. This ensures that anyone waiting on
702 * this push can easily detect the difference between a "push in
703 * progress" and "CIL is empty, nothing to do".
705 * IOWs, a wait loop can now check for:
706 * the current sequence not being found on the committing list;
708 * an unchanged sequence number
709 * to detect a push that had nothing to do and therefore does not need
710 * waiting on. If the CIL is not empty, we get put on the committing
711 * list before emptying the CIL and bumping the sequence number. Hence
712 * an empty CIL and an unchanged sequence number means we jumped out
713 * above after doing nothing.
715 * Hence the waiter will either find the commit sequence on the
716 * committing list or the sequence number will be unchanged and the CIL
717 * still dirty. In that latter case, the push has not yet started, and
718 * so the waiter will have to continue trying to check the CIL
719 * committing list until it is found. In extreme cases of delay, the
720 * sequence may fully commit between the attempts the wait makes to wait
721 * on the commit sequence.
723 list_add(&ctx->committing, &cil->xc_committing);
724 spin_unlock(&cil->xc_push_lock);
727 * pull all the log vectors off the items in the CIL, and
728 * remove the items from the CIL. We don't need the CIL lock
729 * here because it's only needed on the transaction commit
730 * side which is currently locked out by the flush lock.
734 while (!list_empty(&cil->xc_cil)) {
735 struct xfs_log_item *item;
737 item = list_first_entry(&cil->xc_cil,
738 struct xfs_log_item, li_cil);
739 list_del_init(&item->li_cil);
741 ctx->lv_chain = item->li_lv;
743 lv->lv_next = item->li_lv;
746 num_iovecs += lv->lv_niovecs;
750 * initialise the new context and attach it to the CIL. Then attach
751 * the current context to the CIL committing list so it can be found
752 * during log forces to extract the commit lsn of the sequence that
753 * needs to be forced.
755 INIT_LIST_HEAD(&new_ctx->committing);
756 INIT_LIST_HEAD(&new_ctx->busy_extents);
757 new_ctx->sequence = ctx->sequence + 1;
759 cil->xc_ctx = new_ctx;
762 * The switch is now done, so we can drop the context lock and move out
763 * of a shared context. We can't just go straight to the commit record,
764 * though - we need to synchronise with previous and future commits so
765 * that the commit records are correctly ordered in the log to ensure
766 * that we process items during log IO completion in the correct order.
768 * For example, if we get an EFI in one checkpoint and the EFD in the
769 * next (e.g. due to log forces), we do not want the checkpoint with
770 * the EFD to be committed before the checkpoint with the EFI. Hence
771 * we must strictly order the commit records of the checkpoints so
772 * that: a) the checkpoint callbacks are attached to the iclogs in the
773 * correct order; and b) the checkpoints are replayed in correct order
776 * Hence we need to add this context to the committing context list so
777 * that higher sequences will wait for us to write out a commit record
780 * xfs_log_force_seq requires us to mirror the new sequence into the cil
781 * structure atomically with the addition of this sequence to the
782 * committing list. This also ensures that we can do unlocked checks
783 * against the current sequence in log forces without risking
784 * deferencing a freed context pointer.
786 spin_lock(&cil->xc_push_lock);
787 cil->xc_current_sequence = new_ctx->sequence;
788 spin_unlock(&cil->xc_push_lock);
789 up_write(&cil->xc_ctx_lock);
792 * Build a checkpoint transaction header and write it to the log to
793 * begin the transaction. We need to account for the space used by the
794 * transaction header here as it is not accounted for in xlog_write().
796 * The LSN we need to pass to the log items on transaction commit is
797 * the LSN reported by the first log vector write. If we use the commit
798 * record lsn then we can move the tail beyond the grant write head.
801 thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
802 thdr.th_type = XFS_TRANS_CHECKPOINT;
803 thdr.th_tid = tic->t_tid;
804 thdr.th_num_items = num_iovecs;
806 lhdr.i_len = sizeof(xfs_trans_header_t);
807 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
808 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
810 lvhdr.lv_niovecs = 1;
811 lvhdr.lv_iovecp = &lhdr;
812 lvhdr.lv_next = ctx->lv_chain;
814 error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0, true);
816 goto out_abort_free_ticket;
819 * now that we've written the checkpoint into the log, strictly
820 * order the commit records so replay will get them in the right order.
823 spin_lock(&cil->xc_push_lock);
824 list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
826 * Avoid getting stuck in this loop because we were woken by the
827 * shutdown, but then went back to sleep once already in the
830 if (XLOG_FORCED_SHUTDOWN(log)) {
831 spin_unlock(&cil->xc_push_lock);
832 goto out_abort_free_ticket;
836 * Higher sequences will wait for this one so skip them.
837 * Don't wait for our own sequence, either.
839 if (new_ctx->sequence >= ctx->sequence)
841 if (!new_ctx->commit_lsn) {
843 * It is still being pushed! Wait for the push to
844 * complete, then start again from the beginning.
846 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
850 spin_unlock(&cil->xc_push_lock);
852 error = xlog_commit_record(log, tic, &commit_iclog, &commit_lsn);
854 goto out_abort_free_ticket;
856 xfs_log_ticket_ungrant(log, tic);
858 spin_lock(&commit_iclog->ic_callback_lock);
859 if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
860 spin_unlock(&commit_iclog->ic_callback_lock);
863 ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE ||
864 commit_iclog->ic_state == XLOG_STATE_WANT_SYNC);
865 list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks);
866 spin_unlock(&commit_iclog->ic_callback_lock);
869 * now the checkpoint commit is complete and we've attached the
870 * callbacks to the iclog we can assign the commit LSN to the context
871 * and wake up anyone who is waiting for the commit to complete.
873 spin_lock(&cil->xc_push_lock);
874 ctx->commit_lsn = commit_lsn;
875 wake_up_all(&cil->xc_commit_wait);
876 spin_unlock(&cil->xc_push_lock);
878 /* release the hounds! */
879 xfs_log_release_iclog(commit_iclog);
883 up_write(&cil->xc_ctx_lock);
884 xfs_log_ticket_put(new_ctx->ticket);
888 out_abort_free_ticket:
889 xfs_log_ticket_ungrant(log, tic);
891 ASSERT(XLOG_FORCED_SHUTDOWN(log));
892 xlog_cil_committed(ctx);
896 * We need to push CIL every so often so we don't cache more than we can fit in
897 * the log. The limit really is that a checkpoint can't be more than half the
898 * log (the current checkpoint is not allowed to overwrite the previous
899 * checkpoint), but commit latency and memory usage limit this to a smaller
903 xlog_cil_push_background(
904 struct xlog *log) __releases(cil->xc_ctx_lock)
906 struct xfs_cil *cil = log->l_cilp;
909 * The cil won't be empty because we are called while holding the
910 * context lock so whatever we added to the CIL will still be there
912 ASSERT(!list_empty(&cil->xc_cil));
915 * Don't do a background push if we haven't used up all the
916 * space available yet.
918 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
919 up_read(&cil->xc_ctx_lock);
923 spin_lock(&cil->xc_push_lock);
924 if (cil->xc_push_seq < cil->xc_current_sequence) {
925 cil->xc_push_seq = cil->xc_current_sequence;
926 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
930 * Drop the context lock now, we can't hold that if we need to sleep
931 * because we are over the blocking threshold. The push_lock is still
932 * held, so blocking threshold sleep/wakeup is still correctly
935 up_read(&cil->xc_ctx_lock);
938 * If we are well over the space limit, throttle the work that is being
939 * done until the push work on this context has begun. Enforce the hard
940 * throttle on all transaction commits once it has been activated, even
941 * if the committing transactions have resulted in the space usage
942 * dipping back down under the hard limit.
944 * The ctx->xc_push_lock provides the serialisation necessary for safely
945 * using the lockless waitqueue_active() check in this context.
947 if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
948 waitqueue_active(&cil->xc_push_wait)) {
949 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
950 ASSERT(cil->xc_ctx->space_used < log->l_logsize);
951 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
955 spin_unlock(&cil->xc_push_lock);
960 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
961 * number that is passed. When it returns, the work will be queued for
962 * @push_seq, but it won't be completed. The caller is expected to do any
963 * waiting for push_seq to complete if it is required.
970 struct xfs_cil *cil = log->l_cilp;
975 ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
977 /* start on any pending background push to minimise wait time on it */
978 flush_work(&cil->xc_push_work);
981 * If the CIL is empty or we've already pushed the sequence then
982 * there's no work we need to do.
984 spin_lock(&cil->xc_push_lock);
985 if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
986 spin_unlock(&cil->xc_push_lock);
990 cil->xc_push_seq = push_seq;
991 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
992 spin_unlock(&cil->xc_push_lock);
999 struct xfs_cil *cil = log->l_cilp;
1002 spin_lock(&cil->xc_push_lock);
1003 if (list_empty(&cil->xc_cil))
1005 spin_unlock(&cil->xc_push_lock);
1010 * Commit a transaction with the given vector to the Committed Item List.
1012 * To do this, we need to format the item, pin it in memory if required and
1013 * account for the space used by the transaction. Once we have done that we
1014 * need to release the unused reservation for the transaction, attach the
1015 * transaction to the checkpoint context so we carry the busy extents through
1016 * to checkpoint completion, and then unlock all the items in the transaction.
1018 * Called with the context lock already held in read mode to lock out
1019 * background commit, returns without it held once background commits are
1025 struct xfs_trans *tp,
1026 xfs_csn_t *commit_seq,
1029 struct xfs_cil *cil = log->l_cilp;
1030 struct xfs_log_item *lip, *next;
1033 * Do all necessary memory allocation before we lock the CIL.
1034 * This ensures the allocation does not deadlock with a CIL
1035 * push in memory reclaim (e.g. from kswapd).
1037 xlog_cil_alloc_shadow_bufs(log, tp);
1039 /* lock out background commit */
1040 down_read(&cil->xc_ctx_lock);
1042 xlog_cil_insert_items(log, tp);
1044 if (regrant && !XLOG_FORCED_SHUTDOWN(log))
1045 xfs_log_ticket_regrant(log, tp->t_ticket);
1047 xfs_log_ticket_ungrant(log, tp->t_ticket);
1048 tp->t_ticket = NULL;
1049 xfs_trans_unreserve_and_mod_sb(tp);
1052 * Once all the items of the transaction have been copied to the CIL,
1053 * the items can be unlocked and possibly freed.
1055 * This needs to be done before we drop the CIL context lock because we
1056 * have to update state in the log items and unlock them before they go
1057 * to disk. If we don't, then the CIL checkpoint can race with us and
1058 * we can run checkpoint completion before we've updated and unlocked
1059 * the log items. This affects (at least) processing of stale buffers,
1062 trace_xfs_trans_commit_items(tp, _RET_IP_);
1063 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1064 xfs_trans_del_item(lip);
1065 if (lip->li_ops->iop_committing)
1066 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1069 *commit_seq = cil->xc_ctx->sequence;
1071 /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1072 xlog_cil_push_background(log);
1076 * Conditionally push the CIL based on the sequence passed in.
1078 * We only need to push if we haven't already pushed the sequence
1079 * number given. Hence the only time we will trigger a push here is
1080 * if the push sequence is the same as the current context.
1082 * We return the current commit lsn to allow the callers to determine if a
1083 * iclog flush is necessary following this call.
1090 struct xfs_cil *cil = log->l_cilp;
1091 struct xfs_cil_ctx *ctx;
1092 xfs_lsn_t commit_lsn = NULLCOMMITLSN;
1094 ASSERT(sequence <= cil->xc_current_sequence);
1097 * check to see if we need to force out the current context.
1098 * xlog_cil_push() handles racing pushes for the same sequence,
1099 * so no need to deal with it here.
1102 xlog_cil_push_now(log, sequence);
1105 * See if we can find a previous sequence still committing.
1106 * We need to wait for all previous sequence commits to complete
1107 * before allowing the force of push_seq to go ahead. Hence block
1108 * on commits for those as well.
1110 spin_lock(&cil->xc_push_lock);
1111 list_for_each_entry(ctx, &cil->xc_committing, committing) {
1113 * Avoid getting stuck in this loop because we were woken by the
1114 * shutdown, but then went back to sleep once already in the
1117 if (XLOG_FORCED_SHUTDOWN(log))
1119 if (ctx->sequence > sequence)
1121 if (!ctx->commit_lsn) {
1123 * It is still being pushed! Wait for the push to
1124 * complete, then start again from the beginning.
1126 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1129 if (ctx->sequence != sequence)
1132 commit_lsn = ctx->commit_lsn;
1136 * The call to xlog_cil_push_now() executes the push in the background.
1137 * Hence by the time we have got here it our sequence may not have been
1138 * pushed yet. This is true if the current sequence still matches the
1139 * push sequence after the above wait loop and the CIL still contains
1140 * dirty objects. This is guaranteed by the push code first adding the
1141 * context to the committing list before emptying the CIL.
1143 * Hence if we don't find the context in the committing list and the
1144 * current sequence number is unchanged then the CIL contents are
1145 * significant. If the CIL is empty, if means there was nothing to push
1146 * and that means there is nothing to wait for. If the CIL is not empty,
1147 * it means we haven't yet started the push, because if it had started
1148 * we would have found the context on the committing list.
1150 if (sequence == cil->xc_current_sequence &&
1151 !list_empty(&cil->xc_cil)) {
1152 spin_unlock(&cil->xc_push_lock);
1156 spin_unlock(&cil->xc_push_lock);
1160 * We detected a shutdown in progress. We need to trigger the log force
1161 * to pass through it's iclog state machine error handling, even though
1162 * we are already in a shutdown state. Hence we can't return
1163 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1164 * LSN is already stable), so we return a zero LSN instead.
1167 spin_unlock(&cil->xc_push_lock);
1172 * Check if the current log item was first committed in this sequence.
1173 * We can't rely on just the log item being in the CIL, we have to check
1174 * the recorded commit sequence number.
1176 * Note: for this to be used in a non-racy manner, it has to be called with
1177 * CIL flushing locked out. As a result, it should only be used during the
1178 * transaction commit process when deciding what to format into the item.
1181 xfs_log_item_in_current_chkpt(
1182 struct xfs_log_item *lip)
1184 struct xfs_cil *cil = lip->li_mountp->m_log->l_cilp;
1186 if (list_empty(&lip->li_cil))
1190 * li_seq is written on the first commit of a log item to record the
1191 * first checkpoint it is written to. Hence if it is different to the
1192 * current sequence, we're in a new checkpoint.
1194 return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
1198 * Perform initial CIL structure initialisation.
1204 struct xfs_cil *cil;
1205 struct xfs_cil_ctx *ctx;
1207 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1211 ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL);
1217 INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
1218 INIT_LIST_HEAD(&cil->xc_cil);
1219 INIT_LIST_HEAD(&cil->xc_committing);
1220 spin_lock_init(&cil->xc_cil_lock);
1221 spin_lock_init(&cil->xc_push_lock);
1222 init_waitqueue_head(&cil->xc_push_wait);
1223 init_rwsem(&cil->xc_ctx_lock);
1224 init_waitqueue_head(&cil->xc_commit_wait);
1226 INIT_LIST_HEAD(&ctx->committing);
1227 INIT_LIST_HEAD(&ctx->busy_extents);
1231 cil->xc_current_sequence = ctx->sequence;
1242 if (log->l_cilp->xc_ctx) {
1243 if (log->l_cilp->xc_ctx->ticket)
1244 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
1245 kmem_free(log->l_cilp->xc_ctx);
1248 ASSERT(list_empty(&log->l_cilp->xc_cil));
1249 kmem_free(log->l_cilp);