1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_buf_item.h"
17 #include "xfs_inode.h"
18 #include "xfs_inode_item.h"
19 #include "xfs_quota.h"
20 #include "xfs_dquot_item.h"
21 #include "xfs_dquot.h"
22 #include "xfs_trace.h"
24 #include "xfs_log_priv.h"
27 struct kmem_cache *xfs_buf_item_cache;
29 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
31 return container_of(lip, struct xfs_buf_log_item, bli_item);
34 /* Is this log iovec plausibly large enough to contain the buffer log format? */
36 xfs_buf_log_check_iovec(
37 struct xfs_log_iovec *iovec)
39 struct xfs_buf_log_format *blfp = iovec->i_addr;
43 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
46 item_end = (char *)iovec->i_addr + iovec->i_len;
47 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
48 return bmp_end <= item_end;
52 xfs_buf_log_format_size(
53 struct xfs_buf_log_format *blfp)
55 return offsetof(struct xfs_buf_log_format, blf_data_map) +
56 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
60 xfs_buf_item_straddle(
68 first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
69 last = xfs_buf_offset(bp,
70 offset + ((first_bit + nbits) << XFS_BLF_SHIFT));
72 if (last - first != nbits * XFS_BLF_CHUNK)
78 * Return the number of log iovecs and space needed to log the given buf log
81 * It calculates this as 1 iovec for the buf log format structure and 1 for each
82 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
86 xfs_buf_item_size_segment(
87 struct xfs_buf_log_item *bip,
88 struct xfs_buf_log_format *blfp,
93 struct xfs_buf *bp = bip->bli_buf;
99 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
104 *nbytes += xfs_buf_log_format_size(blfp);
107 nbits = xfs_contig_bits(blfp->blf_data_map,
108 blfp->blf_map_size, first_bit);
112 * Straddling a page is rare because we don't log contiguous
113 * chunks of unmapped buffers anywhere.
116 xfs_buf_item_straddle(bp, offset, first_bit, nbits))
120 *nbytes += nbits * XFS_BLF_CHUNK;
123 * This takes the bit number to start looking from and
124 * returns the next set bit from there. It returns -1
125 * if there are no more bits set or the start bit is
126 * beyond the end of the bitmap.
128 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
129 (uint)first_bit + nbits + 1);
130 } while (first_bit != -1);
135 /* Count the first bit we jumped out of the above loop from */
137 *nbytes += XFS_BLF_CHUNK;
138 last_bit = first_bit;
139 while (last_bit != -1) {
141 * This takes the bit number to start looking from and
142 * returns the next set bit from there. It returns -1
143 * if there are no more bits set or the start bit is
144 * beyond the end of the bitmap.
146 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
149 * If we run out of bits, leave the loop,
150 * else if we find a new set of bits bump the number of vecs,
151 * else keep scanning the current set of bits.
153 if (next_bit == -1) {
155 } else if (next_bit != last_bit + 1 ||
156 xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
158 first_bit = next_bit;
165 *nbytes += XFS_BLF_CHUNK;
170 * Return the number of log iovecs and space needed to log the given buf log
173 * Discontiguous buffers need a format structure per region that is being
174 * logged. This makes the changes in the buffer appear to log recovery as though
175 * they came from separate buffers, just like would occur if multiple buffers
176 * were used instead of a single discontiguous buffer. This enables
177 * discontiguous buffers to be in-memory constructs, completely transparent to
178 * what ends up on disk.
180 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
181 * format structures. If the item has previously been logged and has dirty
182 * regions, we do not relog them in stale buffers. This has the effect of
183 * reducing the size of the relogged item by the amount of dirty data tracked
184 * by the log item. This can result in the committing transaction reducing the
185 * amount of space being consumed by the CIL.
189 struct xfs_log_item *lip,
193 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
194 struct xfs_buf *bp = bip->bli_buf;
199 ASSERT(atomic_read(&bip->bli_refcount) > 0);
200 if (bip->bli_flags & XFS_BLI_STALE) {
202 * The buffer is stale, so all we need to log is the buf log
203 * format structure with the cancel flag in it as we are never
204 * going to replay the changes tracked in the log item.
206 trace_xfs_buf_item_size_stale(bip);
207 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
208 *nvecs += bip->bli_format_count;
209 for (i = 0; i < bip->bli_format_count; i++) {
210 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
215 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
217 if (bip->bli_flags & XFS_BLI_ORDERED) {
219 * The buffer has been logged just to order it. It is not being
220 * included in the transaction commit, so no vectors are used at
223 trace_xfs_buf_item_size_ordered(bip);
224 *nvecs = XFS_LOG_VEC_ORDERED;
229 * The vector count is based on the number of buffer vectors we have
230 * dirty bits in. This will only be greater than one when we have a
231 * compound buffer with more than one segment dirty. Hence for compound
232 * buffers we need to track which segment the dirty bits correspond to,
233 * and when we move from one segment to the next increment the vector
234 * count for the extra buf log format structure that will need to be
238 for (i = 0; i < bip->bli_format_count; i++) {
239 xfs_buf_item_size_segment(bip, &bip->bli_formats[i], offset,
241 offset += BBTOB(bp->b_maps[i].bm_len);
245 * Round up the buffer size required to minimise the number of memory
246 * allocations that need to be done as this item grows when relogged by
247 * repeated modifications.
249 *nbytes = round_up(bytes, 512);
250 trace_xfs_buf_item_size(bip);
254 xfs_buf_item_copy_iovec(
255 struct xfs_log_vec *lv,
256 struct xfs_log_iovec **vecp,
262 offset += first_bit * XFS_BLF_CHUNK;
263 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
264 xfs_buf_offset(bp, offset),
265 nbits * XFS_BLF_CHUNK);
269 xfs_buf_item_format_segment(
270 struct xfs_buf_log_item *bip,
271 struct xfs_log_vec *lv,
272 struct xfs_log_iovec **vecp,
274 struct xfs_buf_log_format *blfp)
276 struct xfs_buf *bp = bip->bli_buf;
283 /* copy the flags across from the base format item */
284 blfp->blf_flags = bip->__bli_format.blf_flags;
287 * Base size is the actual size of the ondisk structure - it reflects
288 * the actual size of the dirty bitmap rather than the size of the in
291 base_size = xfs_buf_log_format_size(blfp);
293 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
294 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
296 * If the map is not be dirty in the transaction, mark
297 * the size as zero and do not advance the vector pointer.
302 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
305 if (bip->bli_flags & XFS_BLI_STALE) {
307 * The buffer is stale, so all we need to log
308 * is the buf log format structure with the
311 trace_xfs_buf_item_format_stale(bip);
312 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
318 * Fill in an iovec for each set of contiguous chunks.
321 ASSERT(first_bit >= 0);
322 nbits = xfs_contig_bits(blfp->blf_data_map,
323 blfp->blf_map_size, first_bit);
327 * Straddling a page is rare because we don't log contiguous
328 * chunks of unmapped buffers anywhere.
331 xfs_buf_item_straddle(bp, offset, first_bit, nbits))
334 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
339 * This takes the bit number to start looking from and
340 * returns the next set bit from there. It returns -1
341 * if there are no more bits set or the start bit is
342 * beyond the end of the bitmap.
344 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
345 (uint)first_bit + nbits + 1);
346 } while (first_bit != -1);
351 ASSERT(bp->b_addr == NULL);
352 last_bit = first_bit;
356 * This takes the bit number to start looking from and
357 * returns the next set bit from there. It returns -1
358 * if there are no more bits set or the start bit is
359 * beyond the end of the bitmap.
361 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
364 * If we run out of bits fill in the last iovec and get out of
365 * the loop. Else if we start a new set of bits then fill in
366 * the iovec for the series we were looking at and start
367 * counting the bits in the new one. Else we're still in the
368 * same set of bits so just keep counting and scanning.
370 if (next_bit == -1) {
371 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
375 } else if (next_bit != last_bit + 1 ||
376 xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
377 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
380 first_bit = next_bit;
391 * This is called to fill in the vector of log iovecs for the
392 * given log buf item. It fills the first entry with a buf log
393 * format structure, and the rest point to contiguous chunks
398 struct xfs_log_item *lip,
399 struct xfs_log_vec *lv)
401 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
402 struct xfs_buf *bp = bip->bli_buf;
403 struct xfs_log_iovec *vecp = NULL;
407 ASSERT(atomic_read(&bip->bli_refcount) > 0);
408 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
409 (bip->bli_flags & XFS_BLI_STALE));
410 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
411 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
412 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
413 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
414 (bip->bli_flags & XFS_BLI_STALE));
418 * If it is an inode buffer, transfer the in-memory state to the
419 * format flags and clear the in-memory state.
421 * For buffer based inode allocation, we do not transfer
422 * this state if the inode buffer allocation has not yet been committed
423 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
424 * correct replay of the inode allocation.
426 * For icreate item based inode allocation, the buffers aren't written
427 * to the journal during allocation, and hence we should always tag the
428 * buffer as an inode buffer so that the correct unlinked list replay
429 * occurs during recovery.
431 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
432 if (xfs_has_v3inodes(lip->li_log->l_mp) ||
433 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
434 xfs_log_item_in_current_chkpt(lip)))
435 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
436 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
439 for (i = 0; i < bip->bli_format_count; i++) {
440 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
441 &bip->bli_formats[i]);
442 offset += BBTOB(bp->b_maps[i].bm_len);
446 * Check to make sure everything is consistent.
448 trace_xfs_buf_item_format(bip);
452 * This is called to pin the buffer associated with the buf log item in memory
453 * so it cannot be written out.
455 * We also always take a reference to the buffer log item here so that the bli
456 * is held while the item is pinned in memory. This means that we can
457 * unconditionally drop the reference count a transaction holds when the
458 * transaction is completed.
462 struct xfs_log_item *lip)
464 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
466 ASSERT(atomic_read(&bip->bli_refcount) > 0);
467 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
468 (bip->bli_flags & XFS_BLI_ORDERED) ||
469 (bip->bli_flags & XFS_BLI_STALE));
471 trace_xfs_buf_item_pin(bip);
473 atomic_inc(&bip->bli_refcount);
474 atomic_inc(&bip->bli_buf->b_pin_count);
478 * This is called to unpin the buffer associated with the buf log item which
479 * was previously pinned with a call to xfs_buf_item_pin().
483 struct xfs_log_item *lip,
486 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
487 struct xfs_buf *bp = bip->bli_buf;
488 int stale = bip->bli_flags & XFS_BLI_STALE;
491 ASSERT(bp->b_log_item == bip);
492 ASSERT(atomic_read(&bip->bli_refcount) > 0);
494 trace_xfs_buf_item_unpin(bip);
497 * Drop the bli ref associated with the pin and grab the hold required
498 * for the I/O simulation failure in the abort case. We have to do this
499 * before the pin count drops because the AIL doesn't acquire a bli
500 * reference. Therefore if the refcount drops to zero, the bli could
501 * still be AIL resident and the buffer submitted for I/O (and freed on
502 * completion) at any point before we return. This can be removed once
503 * the AIL properly holds a reference on the bli.
505 freed = atomic_dec_and_test(&bip->bli_refcount);
506 if (freed && !stale && remove)
508 if (atomic_dec_and_test(&bp->b_pin_count))
509 wake_up_all(&bp->b_waiters);
511 /* nothing to do but drop the pin count if the bli is active */
516 ASSERT(bip->bli_flags & XFS_BLI_STALE);
517 ASSERT(xfs_buf_islocked(bp));
518 ASSERT(bp->b_flags & XBF_STALE);
519 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
520 ASSERT(list_empty(&lip->li_trans));
521 ASSERT(!bp->b_transp);
523 trace_xfs_buf_item_unpin_stale(bip);
526 * If we get called here because of an IO error, we may or may
527 * not have the item on the AIL. xfs_trans_ail_delete() will
528 * take care of that situation. xfs_trans_ail_delete() drops
531 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
532 xfs_buf_item_done(bp);
533 xfs_buf_inode_iodone(bp);
534 ASSERT(list_empty(&bp->b_li_list));
536 xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
537 xfs_buf_item_relse(bp);
538 ASSERT(bp->b_log_item == NULL);
543 * The buffer must be locked and held by the caller to simulate
544 * an async I/O failure. We acquired the hold for this case
545 * before the buffer was unpinned.
548 bp->b_flags |= XBF_ASYNC;
549 xfs_buf_ioend_fail(bp);
555 struct xfs_log_item *lip,
556 struct list_head *buffer_list)
558 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
559 struct xfs_buf *bp = bip->bli_buf;
560 uint rval = XFS_ITEM_SUCCESS;
562 if (xfs_buf_ispinned(bp))
563 return XFS_ITEM_PINNED;
564 if (!xfs_buf_trylock(bp)) {
566 * If we have just raced with a buffer being pinned and it has
567 * been marked stale, we could end up stalling until someone else
568 * issues a log force to unpin the stale buffer. Check for the
569 * race condition here so xfsaild recognizes the buffer is pinned
570 * and queues a log force to move it along.
572 if (xfs_buf_ispinned(bp))
573 return XFS_ITEM_PINNED;
574 return XFS_ITEM_LOCKED;
577 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
579 trace_xfs_buf_item_push(bip);
581 /* has a previous flush failed due to IO errors? */
582 if (bp->b_flags & XBF_WRITE_FAIL) {
583 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
584 "Failing async write on buffer block 0x%llx. Retrying async write.",
585 (long long)xfs_buf_daddr(bp));
588 if (!xfs_buf_delwri_queue(bp, buffer_list))
589 rval = XFS_ITEM_FLUSHING;
595 * Drop the buffer log item refcount and take appropriate action. This helper
596 * determines whether the bli must be freed or not, since a decrement to zero
597 * does not necessarily mean the bli is unused.
599 * Return true if the bli is freed, false otherwise.
603 struct xfs_buf_log_item *bip)
605 struct xfs_log_item *lip = &bip->bli_item;
609 /* drop the bli ref and return if it wasn't the last one */
610 if (!atomic_dec_and_test(&bip->bli_refcount))
614 * We dropped the last ref and must free the item if clean or aborted.
615 * If the bli is dirty and non-aborted, the buffer was clean in the
616 * transaction but still awaiting writeback from previous changes. In
617 * that case, the bli is freed on buffer writeback completion.
619 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
620 xlog_is_shutdown(lip->li_log);
621 dirty = bip->bli_flags & XFS_BLI_DIRTY;
622 if (dirty && !aborted)
626 * The bli is aborted or clean. An aborted item may be in the AIL
627 * regardless of dirty state. For example, consider an aborted
628 * transaction that invalidated a dirty bli and cleared the dirty
632 xfs_trans_ail_delete(lip, 0);
633 xfs_buf_item_relse(bip->bli_buf);
638 * Release the buffer associated with the buf log item. If there is no dirty
639 * logged data associated with the buffer recorded in the buf log item, then
640 * free the buf log item and remove the reference to it in the buffer.
642 * This call ignores the recursion count. It is only called when the buffer
643 * should REALLY be unlocked, regardless of the recursion count.
645 * We unconditionally drop the transaction's reference to the log item. If the
646 * item was logged, then another reference was taken when it was pinned, so we
647 * can safely drop the transaction reference now. This also allows us to avoid
648 * potential races with the unpin code freeing the bli by not referencing the
649 * bli after we've dropped the reference count.
651 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
652 * if necessary but do not unlock the buffer. This is for support of
653 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
657 xfs_buf_item_release(
658 struct xfs_log_item *lip)
660 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
661 struct xfs_buf *bp = bip->bli_buf;
663 bool hold = bip->bli_flags & XFS_BLI_HOLD;
664 bool stale = bip->bli_flags & XFS_BLI_STALE;
665 #if defined(DEBUG) || defined(XFS_WARN)
666 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
667 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
668 bool aborted = test_bit(XFS_LI_ABORTED,
672 trace_xfs_buf_item_release(bip);
675 * The bli dirty state should match whether the blf has logged segments
676 * except for ordered buffers, where only the bli should be dirty.
678 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
679 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
680 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
683 * Clear the buffer's association with this transaction and
684 * per-transaction state from the bli, which has been copied above.
687 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
690 * Unref the item and unlock the buffer unless held or stale. Stale
691 * buffers remain locked until final unpin unless the bli is freed by
692 * the unref call. The latter implies shutdown because buffer
693 * invalidation dirties the bli and transaction.
695 released = xfs_buf_item_put(bip);
696 if (hold || (stale && !released))
698 ASSERT(!stale || aborted);
703 xfs_buf_item_committing(
704 struct xfs_log_item *lip,
707 return xfs_buf_item_release(lip);
711 * This is called to find out where the oldest active copy of the
712 * buf log item in the on disk log resides now that the last log
713 * write of it completed at the given lsn.
714 * We always re-log all the dirty data in a buffer, so usually the
715 * latest copy in the on disk log is the only one that matters. For
716 * those cases we simply return the given lsn.
718 * The one exception to this is for buffers full of newly allocated
719 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
720 * flag set, indicating that only the di_next_unlinked fields from the
721 * inodes in the buffers will be replayed during recovery. If the
722 * original newly allocated inode images have not yet been flushed
723 * when the buffer is so relogged, then we need to make sure that we
724 * keep the old images in the 'active' portion of the log. We do this
725 * by returning the original lsn of that transaction here rather than
729 xfs_buf_item_committed(
730 struct xfs_log_item *lip,
733 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
735 trace_xfs_buf_item_committed(bip);
737 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
742 static const struct xfs_item_ops xfs_buf_item_ops = {
743 .iop_size = xfs_buf_item_size,
744 .iop_format = xfs_buf_item_format,
745 .iop_pin = xfs_buf_item_pin,
746 .iop_unpin = xfs_buf_item_unpin,
747 .iop_release = xfs_buf_item_release,
748 .iop_committing = xfs_buf_item_committing,
749 .iop_committed = xfs_buf_item_committed,
750 .iop_push = xfs_buf_item_push,
754 xfs_buf_item_get_format(
755 struct xfs_buf_log_item *bip,
758 ASSERT(bip->bli_formats == NULL);
759 bip->bli_format_count = count;
762 bip->bli_formats = &bip->__bli_format;
766 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
771 xfs_buf_item_free_format(
772 struct xfs_buf_log_item *bip)
774 if (bip->bli_formats != &bip->__bli_format) {
775 kmem_free(bip->bli_formats);
776 bip->bli_formats = NULL;
781 * Allocate a new buf log item to go with the given buffer.
782 * Set the buffer's b_log_item field to point to the new
788 struct xfs_mount *mp)
790 struct xfs_buf_log_item *bip = bp->b_log_item;
796 * Check to see if there is already a buf log item for
797 * this buffer. If we do already have one, there is
798 * nothing to do here so return.
800 ASSERT(bp->b_mount == mp);
802 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
803 ASSERT(!bp->b_transp);
804 ASSERT(bip->bli_buf == bp);
808 bip = kmem_cache_zalloc(xfs_buf_item_cache, GFP_KERNEL | __GFP_NOFAIL);
809 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
813 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
814 * can be divided into. Make sure not to truncate any pieces.
815 * map_size is the size of the bitmap needed to describe the
816 * chunks of the buffer.
818 * Discontiguous buffer support follows the layout of the underlying
819 * buffer. This makes the implementation as simple as possible.
821 xfs_buf_item_get_format(bip, bp->b_map_count);
823 for (i = 0; i < bip->bli_format_count; i++) {
824 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
826 map_size = DIV_ROUND_UP(chunks, NBWORD);
828 if (map_size > XFS_BLF_DATAMAP_SIZE) {
829 kmem_cache_free(xfs_buf_item_cache, bip);
831 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
833 BBTOB(bp->b_maps[i].bm_len));
834 return -EFSCORRUPTED;
837 bip->bli_formats[i].blf_type = XFS_LI_BUF;
838 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
839 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
840 bip->bli_formats[i].blf_map_size = map_size;
843 bp->b_log_item = bip;
850 * Mark bytes first through last inclusive as dirty in the buf
854 xfs_buf_item_log_segment(
869 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
870 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
873 * Convert byte offsets to bit numbers.
875 first_bit = first >> XFS_BLF_SHIFT;
876 last_bit = last >> XFS_BLF_SHIFT;
879 * Calculate the total number of bits to be set.
881 bits_to_set = last_bit - first_bit + 1;
884 * Get a pointer to the first word in the bitmap
887 word_num = first_bit >> BIT_TO_WORD_SHIFT;
888 wordp = &map[word_num];
891 * Calculate the starting bit in the first word.
893 bit = first_bit & (uint)(NBWORD - 1);
896 * First set any bits in the first word of our range.
897 * If it starts at bit 0 of the word, it will be
898 * set below rather than here. That is what the variable
899 * bit tells us. The variable bits_set tracks the number
900 * of bits that have been set so far. End_bit is the number
901 * of the last bit to be set in this word plus one.
904 end_bit = min(bit + bits_to_set, (uint)NBWORD);
905 mask = ((1U << (end_bit - bit)) - 1) << bit;
908 bits_set = end_bit - bit;
914 * Now set bits a whole word at a time that are between
915 * first_bit and last_bit.
917 while ((bits_to_set - bits_set) >= NBWORD) {
924 * Finally, set any bits left to be set in one last partial word.
926 end_bit = bits_to_set - bits_set;
928 mask = (1U << end_bit) - 1;
934 * Mark bytes first through last inclusive as dirty in the buf
939 struct xfs_buf_log_item *bip,
946 struct xfs_buf *bp = bip->bli_buf;
949 * walk each buffer segment and mark them dirty appropriately.
952 for (i = 0; i < bip->bli_format_count; i++) {
955 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
957 /* skip to the map that includes the first byte to log */
959 start += BBTOB(bp->b_maps[i].bm_len);
964 * Trim the range to this segment and mark it in the bitmap.
965 * Note that we must convert buffer offsets to segment relative
966 * offsets (e.g., the first byte of each segment is byte 0 of
973 xfs_buf_item_log_segment(first - start, end - start,
974 &bip->bli_formats[i].blf_data_map[0]);
976 start += BBTOB(bp->b_maps[i].bm_len);
982 * Return true if the buffer has any ranges logged/dirtied by a transaction,
986 xfs_buf_item_dirty_format(
987 struct xfs_buf_log_item *bip)
991 for (i = 0; i < bip->bli_format_count; i++) {
992 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
993 bip->bli_formats[i].blf_map_size))
1002 struct xfs_buf_log_item *bip)
1004 xfs_buf_item_free_format(bip);
1005 kmem_free(bip->bli_item.li_lv_shadow);
1006 kmem_cache_free(xfs_buf_item_cache, bip);
1010 * xfs_buf_item_relse() is called when the buf log item is no longer needed.
1016 struct xfs_buf_log_item *bip = bp->b_log_item;
1018 trace_xfs_buf_item_relse(bp, _RET_IP_);
1019 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
1021 bp->b_log_item = NULL;
1023 xfs_buf_item_free(bip);
1031 * If we are forcibly shutting down, this may well be off the AIL
1032 * already. That's because we simulate the log-committed callbacks to
1033 * unpin these buffers. Or we may never have put this item on AIL
1034 * because of the transaction was aborted forcibly.
1035 * xfs_trans_ail_delete() takes care of these.
1037 * Either way, AIL is useless if we're forcing a shutdown.
1039 * Note that log recovery writes might have buffer items that are not on
1040 * the AIL even when the file system is not shut down.
1042 xfs_trans_ail_delete(&bp->b_log_item->bli_item,
1043 (bp->b_flags & _XBF_LOGRECOVERY) ? 0 :
1044 SHUTDOWN_CORRUPT_INCORE);
1045 xfs_buf_item_relse(bp);