1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_buf_item.h"
17 #include "xfs_inode.h"
18 #include "xfs_inode_item.h"
19 #include "xfs_quota.h"
20 #include "xfs_dquot_item.h"
21 #include "xfs_dquot.h"
22 #include "xfs_trans_priv.h"
23 #include "xfs_trace.h"
27 kmem_zone_t *xfs_buf_item_zone;
29 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
31 return container_of(lip, struct xfs_buf_log_item, bli_item);
34 static void xfs_buf_item_done(struct xfs_buf *bp);
36 /* Is this log iovec plausibly large enough to contain the buffer log format? */
38 xfs_buf_log_check_iovec(
39 struct xfs_log_iovec *iovec)
41 struct xfs_buf_log_format *blfp = iovec->i_addr;
45 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
48 item_end = (char *)iovec->i_addr + iovec->i_len;
49 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
50 return bmp_end <= item_end;
54 xfs_buf_log_format_size(
55 struct xfs_buf_log_format *blfp)
57 return offsetof(struct xfs_buf_log_format, blf_data_map) +
58 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
62 * This returns the number of log iovecs needed to log the
65 * It calculates this as 1 iovec for the buf log format structure
66 * and 1 for each stretch of non-contiguous chunks to be logged.
67 * Contiguous chunks are logged in a single iovec.
69 * If the XFS_BLI_STALE flag has been set, then log nothing.
72 xfs_buf_item_size_segment(
73 struct xfs_buf_log_item *bip,
74 struct xfs_buf_log_format *blfp,
78 struct xfs_buf *bp = bip->bli_buf;
82 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
87 * initial count for a dirty buffer is 2 vectors - the format structure
88 * and the first dirty region.
91 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
93 while (last_bit != -1) {
95 * This takes the bit number to start looking from and
96 * returns the next set bit from there. It returns -1
97 * if there are no more bits set or the start bit is
98 * beyond the end of the bitmap.
100 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
103 * If we run out of bits, leave the loop,
104 * else if we find a new set of bits bump the number of vecs,
105 * else keep scanning the current set of bits.
107 if (next_bit == -1) {
109 } else if (next_bit != last_bit + 1) {
112 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
113 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
120 *nbytes += XFS_BLF_CHUNK;
125 * This returns the number of log iovecs needed to log the given buf log item.
127 * It calculates this as 1 iovec for the buf log format structure and 1 for each
128 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
131 * Discontiguous buffers need a format structure per region that that is being
132 * logged. This makes the changes in the buffer appear to log recovery as though
133 * they came from separate buffers, just like would occur if multiple buffers
134 * were used instead of a single discontiguous buffer. This enables
135 * discontiguous buffers to be in-memory constructs, completely transparent to
136 * what ends up on disk.
138 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
143 struct xfs_log_item *lip,
147 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
150 ASSERT(atomic_read(&bip->bli_refcount) > 0);
151 if (bip->bli_flags & XFS_BLI_STALE) {
153 * The buffer is stale, so all we need to log
154 * is the buf log format structure with the
157 trace_xfs_buf_item_size_stale(bip);
158 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
159 *nvecs += bip->bli_format_count;
160 for (i = 0; i < bip->bli_format_count; i++) {
161 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
166 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
168 if (bip->bli_flags & XFS_BLI_ORDERED) {
170 * The buffer has been logged just to order it.
171 * It is not being included in the transaction
172 * commit, so no vectors are used at all.
174 trace_xfs_buf_item_size_ordered(bip);
175 *nvecs = XFS_LOG_VEC_ORDERED;
180 * the vector count is based on the number of buffer vectors we have
181 * dirty bits in. This will only be greater than one when we have a
182 * compound buffer with more than one segment dirty. Hence for compound
183 * buffers we need to track which segment the dirty bits correspond to,
184 * and when we move from one segment to the next increment the vector
185 * count for the extra buf log format structure that will need to be
188 for (i = 0; i < bip->bli_format_count; i++) {
189 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
192 trace_xfs_buf_item_size(bip);
196 xfs_buf_item_copy_iovec(
197 struct xfs_log_vec *lv,
198 struct xfs_log_iovec **vecp,
204 offset += first_bit * XFS_BLF_CHUNK;
205 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
206 xfs_buf_offset(bp, offset),
207 nbits * XFS_BLF_CHUNK);
211 xfs_buf_item_straddle(
217 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
218 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
223 xfs_buf_item_format_segment(
224 struct xfs_buf_log_item *bip,
225 struct xfs_log_vec *lv,
226 struct xfs_log_iovec **vecp,
228 struct xfs_buf_log_format *blfp)
230 struct xfs_buf *bp = bip->bli_buf;
237 /* copy the flags across from the base format item */
238 blfp->blf_flags = bip->__bli_format.blf_flags;
241 * Base size is the actual size of the ondisk structure - it reflects
242 * the actual size of the dirty bitmap rather than the size of the in
245 base_size = xfs_buf_log_format_size(blfp);
247 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
248 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
250 * If the map is not be dirty in the transaction, mark
251 * the size as zero and do not advance the vector pointer.
256 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
259 if (bip->bli_flags & XFS_BLI_STALE) {
261 * The buffer is stale, so all we need to log
262 * is the buf log format structure with the
265 trace_xfs_buf_item_format_stale(bip);
266 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
272 * Fill in an iovec for each set of contiguous chunks.
274 last_bit = first_bit;
278 * This takes the bit number to start looking from and
279 * returns the next set bit from there. It returns -1
280 * if there are no more bits set or the start bit is
281 * beyond the end of the bitmap.
283 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
286 * If we run out of bits fill in the last iovec and get out of
287 * the loop. Else if we start a new set of bits then fill in
288 * the iovec for the series we were looking at and start
289 * counting the bits in the new one. Else we're still in the
290 * same set of bits so just keep counting and scanning.
292 if (next_bit == -1) {
293 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
297 } else if (next_bit != last_bit + 1 ||
298 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
299 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
302 first_bit = next_bit;
313 * This is called to fill in the vector of log iovecs for the
314 * given log buf item. It fills the first entry with a buf log
315 * format structure, and the rest point to contiguous chunks
320 struct xfs_log_item *lip,
321 struct xfs_log_vec *lv)
323 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
324 struct xfs_buf *bp = bip->bli_buf;
325 struct xfs_log_iovec *vecp = NULL;
329 ASSERT(atomic_read(&bip->bli_refcount) > 0);
330 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
331 (bip->bli_flags & XFS_BLI_STALE));
332 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
333 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
334 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
335 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
336 (bip->bli_flags & XFS_BLI_STALE));
340 * If it is an inode buffer, transfer the in-memory state to the
341 * format flags and clear the in-memory state.
343 * For buffer based inode allocation, we do not transfer
344 * this state if the inode buffer allocation has not yet been committed
345 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
346 * correct replay of the inode allocation.
348 * For icreate item based inode allocation, the buffers aren't written
349 * to the journal during allocation, and hence we should always tag the
350 * buffer as an inode buffer so that the correct unlinked list replay
351 * occurs during recovery.
353 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
354 if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
355 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
356 xfs_log_item_in_current_chkpt(lip)))
357 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
358 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
361 for (i = 0; i < bip->bli_format_count; i++) {
362 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
363 &bip->bli_formats[i]);
364 offset += BBTOB(bp->b_maps[i].bm_len);
368 * Check to make sure everything is consistent.
370 trace_xfs_buf_item_format(bip);
374 * This is called to pin the buffer associated with the buf log item in memory
375 * so it cannot be written out.
377 * We also always take a reference to the buffer log item here so that the bli
378 * is held while the item is pinned in memory. This means that we can
379 * unconditionally drop the reference count a transaction holds when the
380 * transaction is completed.
384 struct xfs_log_item *lip)
386 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
388 ASSERT(atomic_read(&bip->bli_refcount) > 0);
389 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
390 (bip->bli_flags & XFS_BLI_ORDERED) ||
391 (bip->bli_flags & XFS_BLI_STALE));
393 trace_xfs_buf_item_pin(bip);
395 atomic_inc(&bip->bli_refcount);
396 atomic_inc(&bip->bli_buf->b_pin_count);
400 * This is called to unpin the buffer associated with the buf log
401 * item which was previously pinned with a call to xfs_buf_item_pin().
403 * Also drop the reference to the buf item for the current transaction.
404 * If the XFS_BLI_STALE flag is set and we are the last reference,
405 * then free up the buf log item and unlock the buffer.
407 * If the remove flag is set we are called from uncommit in the
408 * forced-shutdown path. If that is true and the reference count on
409 * the log item is going to drop to zero we need to free the item's
410 * descriptor in the transaction.
414 struct xfs_log_item *lip,
417 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
418 xfs_buf_t *bp = bip->bli_buf;
419 int stale = bip->bli_flags & XFS_BLI_STALE;
422 ASSERT(bp->b_log_item == bip);
423 ASSERT(atomic_read(&bip->bli_refcount) > 0);
425 trace_xfs_buf_item_unpin(bip);
427 freed = atomic_dec_and_test(&bip->bli_refcount);
429 if (atomic_dec_and_test(&bp->b_pin_count))
430 wake_up_all(&bp->b_waiters);
432 if (freed && stale) {
433 ASSERT(bip->bli_flags & XFS_BLI_STALE);
434 ASSERT(xfs_buf_islocked(bp));
435 ASSERT(bp->b_flags & XBF_STALE);
436 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
438 trace_xfs_buf_item_unpin_stale(bip);
442 * If we are in a transaction context, we have to
443 * remove the log item from the transaction as we are
444 * about to release our reference to the buffer. If we
445 * don't, the unlock that occurs later in
446 * xfs_trans_uncommit() will try to reference the
447 * buffer which we no longer have a hold on.
449 if (!list_empty(&lip->li_trans))
450 xfs_trans_del_item(lip);
453 * Since the transaction no longer refers to the buffer,
454 * the buffer should no longer refer to the transaction.
460 * If we get called here because of an IO error, we may or may
461 * not have the item on the AIL. xfs_trans_ail_delete() will
462 * take care of that situation. xfs_trans_ail_delete() drops
465 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
466 xfs_buf_item_done(bp);
468 ASSERT(list_empty(&bp->b_li_list));
470 xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
471 xfs_buf_item_relse(bp);
472 ASSERT(bp->b_log_item == NULL);
475 } else if (freed && remove) {
477 * The buffer must be locked and held by the caller to simulate
478 * an async I/O failure.
482 bp->b_flags |= XBF_ASYNC;
483 xfs_buf_ioend_fail(bp);
489 struct xfs_log_item *lip,
490 struct list_head *buffer_list)
492 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
493 struct xfs_buf *bp = bip->bli_buf;
494 uint rval = XFS_ITEM_SUCCESS;
496 if (xfs_buf_ispinned(bp))
497 return XFS_ITEM_PINNED;
498 if (!xfs_buf_trylock(bp)) {
500 * If we have just raced with a buffer being pinned and it has
501 * been marked stale, we could end up stalling until someone else
502 * issues a log force to unpin the stale buffer. Check for the
503 * race condition here so xfsaild recognizes the buffer is pinned
504 * and queues a log force to move it along.
506 if (xfs_buf_ispinned(bp))
507 return XFS_ITEM_PINNED;
508 return XFS_ITEM_LOCKED;
511 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
513 trace_xfs_buf_item_push(bip);
515 /* has a previous flush failed due to IO errors? */
516 if (bp->b_flags & XBF_WRITE_FAIL) {
517 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
518 "Failing async write on buffer block 0x%llx. Retrying async write.",
519 (long long)bp->b_bn);
522 if (!xfs_buf_delwri_queue(bp, buffer_list))
523 rval = XFS_ITEM_FLUSHING;
529 * Drop the buffer log item refcount and take appropriate action. This helper
530 * determines whether the bli must be freed or not, since a decrement to zero
531 * does not necessarily mean the bli is unused.
533 * Return true if the bli is freed, false otherwise.
537 struct xfs_buf_log_item *bip)
539 struct xfs_log_item *lip = &bip->bli_item;
543 /* drop the bli ref and return if it wasn't the last one */
544 if (!atomic_dec_and_test(&bip->bli_refcount))
548 * We dropped the last ref and must free the item if clean or aborted.
549 * If the bli is dirty and non-aborted, the buffer was clean in the
550 * transaction but still awaiting writeback from previous changes. In
551 * that case, the bli is freed on buffer writeback completion.
553 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
554 XFS_FORCED_SHUTDOWN(lip->li_mountp);
555 dirty = bip->bli_flags & XFS_BLI_DIRTY;
556 if (dirty && !aborted)
560 * The bli is aborted or clean. An aborted item may be in the AIL
561 * regardless of dirty state. For example, consider an aborted
562 * transaction that invalidated a dirty bli and cleared the dirty
566 xfs_trans_ail_delete(lip, 0);
567 xfs_buf_item_relse(bip->bli_buf);
572 * Release the buffer associated with the buf log item. If there is no dirty
573 * logged data associated with the buffer recorded in the buf log item, then
574 * free the buf log item and remove the reference to it in the buffer.
576 * This call ignores the recursion count. It is only called when the buffer
577 * should REALLY be unlocked, regardless of the recursion count.
579 * We unconditionally drop the transaction's reference to the log item. If the
580 * item was logged, then another reference was taken when it was pinned, so we
581 * can safely drop the transaction reference now. This also allows us to avoid
582 * potential races with the unpin code freeing the bli by not referencing the
583 * bli after we've dropped the reference count.
585 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
586 * if necessary but do not unlock the buffer. This is for support of
587 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
591 xfs_buf_item_release(
592 struct xfs_log_item *lip)
594 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
595 struct xfs_buf *bp = bip->bli_buf;
597 bool hold = bip->bli_flags & XFS_BLI_HOLD;
598 bool stale = bip->bli_flags & XFS_BLI_STALE;
599 #if defined(DEBUG) || defined(XFS_WARN)
600 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
601 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
602 bool aborted = test_bit(XFS_LI_ABORTED,
606 trace_xfs_buf_item_release(bip);
609 * The bli dirty state should match whether the blf has logged segments
610 * except for ordered buffers, where only the bli should be dirty.
612 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
613 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
614 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
617 * Clear the buffer's association with this transaction and
618 * per-transaction state from the bli, which has been copied above.
621 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
624 * Unref the item and unlock the buffer unless held or stale. Stale
625 * buffers remain locked until final unpin unless the bli is freed by
626 * the unref call. The latter implies shutdown because buffer
627 * invalidation dirties the bli and transaction.
629 released = xfs_buf_item_put(bip);
630 if (hold || (stale && !released))
632 ASSERT(!stale || aborted);
637 xfs_buf_item_committing(
638 struct xfs_log_item *lip,
639 xfs_lsn_t commit_lsn)
641 return xfs_buf_item_release(lip);
645 * This is called to find out where the oldest active copy of the
646 * buf log item in the on disk log resides now that the last log
647 * write of it completed at the given lsn.
648 * We always re-log all the dirty data in a buffer, so usually the
649 * latest copy in the on disk log is the only one that matters. For
650 * those cases we simply return the given lsn.
652 * The one exception to this is for buffers full of newly allocated
653 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
654 * flag set, indicating that only the di_next_unlinked fields from the
655 * inodes in the buffers will be replayed during recovery. If the
656 * original newly allocated inode images have not yet been flushed
657 * when the buffer is so relogged, then we need to make sure that we
658 * keep the old images in the 'active' portion of the log. We do this
659 * by returning the original lsn of that transaction here rather than
663 xfs_buf_item_committed(
664 struct xfs_log_item *lip,
667 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
669 trace_xfs_buf_item_committed(bip);
671 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
676 static const struct xfs_item_ops xfs_buf_item_ops = {
677 .iop_size = xfs_buf_item_size,
678 .iop_format = xfs_buf_item_format,
679 .iop_pin = xfs_buf_item_pin,
680 .iop_unpin = xfs_buf_item_unpin,
681 .iop_release = xfs_buf_item_release,
682 .iop_committing = xfs_buf_item_committing,
683 .iop_committed = xfs_buf_item_committed,
684 .iop_push = xfs_buf_item_push,
688 xfs_buf_item_get_format(
689 struct xfs_buf_log_item *bip,
692 ASSERT(bip->bli_formats == NULL);
693 bip->bli_format_count = count;
696 bip->bli_formats = &bip->__bli_format;
700 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
705 xfs_buf_item_free_format(
706 struct xfs_buf_log_item *bip)
708 if (bip->bli_formats != &bip->__bli_format) {
709 kmem_free(bip->bli_formats);
710 bip->bli_formats = NULL;
715 * Allocate a new buf log item to go with the given buffer.
716 * Set the buffer's b_log_item field to point to the new
722 struct xfs_mount *mp)
724 struct xfs_buf_log_item *bip = bp->b_log_item;
730 * Check to see if there is already a buf log item for
731 * this buffer. If we do already have one, there is
732 * nothing to do here so return.
734 ASSERT(bp->b_mount == mp);
736 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
737 ASSERT(!bp->b_transp);
738 ASSERT(bip->bli_buf == bp);
742 bip = kmem_zone_zalloc(xfs_buf_item_zone, 0);
743 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
747 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
748 * can be divided into. Make sure not to truncate any pieces.
749 * map_size is the size of the bitmap needed to describe the
750 * chunks of the buffer.
752 * Discontiguous buffer support follows the layout of the underlying
753 * buffer. This makes the implementation as simple as possible.
755 xfs_buf_item_get_format(bip, bp->b_map_count);
757 for (i = 0; i < bip->bli_format_count; i++) {
758 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
760 map_size = DIV_ROUND_UP(chunks, NBWORD);
762 if (map_size > XFS_BLF_DATAMAP_SIZE) {
763 kmem_cache_free(xfs_buf_item_zone, bip);
765 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
767 BBTOB(bp->b_maps[i].bm_len));
768 return -EFSCORRUPTED;
771 bip->bli_formats[i].blf_type = XFS_LI_BUF;
772 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
773 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
774 bip->bli_formats[i].blf_map_size = map_size;
777 bp->b_log_item = bip;
784 * Mark bytes first through last inclusive as dirty in the buf
788 xfs_buf_item_log_segment(
803 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
804 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
807 * Convert byte offsets to bit numbers.
809 first_bit = first >> XFS_BLF_SHIFT;
810 last_bit = last >> XFS_BLF_SHIFT;
813 * Calculate the total number of bits to be set.
815 bits_to_set = last_bit - first_bit + 1;
818 * Get a pointer to the first word in the bitmap
821 word_num = first_bit >> BIT_TO_WORD_SHIFT;
822 wordp = &map[word_num];
825 * Calculate the starting bit in the first word.
827 bit = first_bit & (uint)(NBWORD - 1);
830 * First set any bits in the first word of our range.
831 * If it starts at bit 0 of the word, it will be
832 * set below rather than here. That is what the variable
833 * bit tells us. The variable bits_set tracks the number
834 * of bits that have been set so far. End_bit is the number
835 * of the last bit to be set in this word plus one.
838 end_bit = min(bit + bits_to_set, (uint)NBWORD);
839 mask = ((1U << (end_bit - bit)) - 1) << bit;
842 bits_set = end_bit - bit;
848 * Now set bits a whole word at a time that are between
849 * first_bit and last_bit.
851 while ((bits_to_set - bits_set) >= NBWORD) {
858 * Finally, set any bits left to be set in one last partial word.
860 end_bit = bits_to_set - bits_set;
862 mask = (1U << end_bit) - 1;
868 * Mark bytes first through last inclusive as dirty in the buf
873 struct xfs_buf_log_item *bip,
880 struct xfs_buf *bp = bip->bli_buf;
883 * walk each buffer segment and mark them dirty appropriately.
886 for (i = 0; i < bip->bli_format_count; i++) {
889 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
891 /* skip to the map that includes the first byte to log */
893 start += BBTOB(bp->b_maps[i].bm_len);
898 * Trim the range to this segment and mark it in the bitmap.
899 * Note that we must convert buffer offsets to segment relative
900 * offsets (e.g., the first byte of each segment is byte 0 of
907 xfs_buf_item_log_segment(first - start, end - start,
908 &bip->bli_formats[i].blf_data_map[0]);
910 start += BBTOB(bp->b_maps[i].bm_len);
916 * Return true if the buffer has any ranges logged/dirtied by a transaction,
920 xfs_buf_item_dirty_format(
921 struct xfs_buf_log_item *bip)
925 for (i = 0; i < bip->bli_format_count; i++) {
926 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
927 bip->bli_formats[i].blf_map_size))
936 struct xfs_buf_log_item *bip)
938 xfs_buf_item_free_format(bip);
939 kmem_free(bip->bli_item.li_lv_shadow);
940 kmem_cache_free(xfs_buf_item_zone, bip);
944 * xfs_buf_item_relse() is called when the buf log item is no longer needed.
950 struct xfs_buf_log_item *bip = bp->b_log_item;
952 trace_xfs_buf_item_relse(bp, _RET_IP_);
953 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
955 bp->b_log_item = NULL;
957 xfs_buf_item_free(bip);
961 * Decide if we're going to retry the write after a failure, and prepare
962 * the buffer for retrying the write.
965 xfs_buf_ioerror_fail_without_retry(
968 struct xfs_mount *mp = bp->b_mount;
969 static ulong lasttime;
970 static xfs_buftarg_t *lasttarg;
973 * If we've already decided to shutdown the filesystem because of
974 * I/O errors, there's no point in giving this a retry.
976 if (XFS_FORCED_SHUTDOWN(mp))
979 if (bp->b_target != lasttarg ||
980 time_after(jiffies, (lasttime + 5*HZ))) {
982 xfs_buf_ioerror_alert(bp, __this_address);
984 lasttarg = bp->b_target;
986 /* synchronous writes will have callers process the error */
987 if (!(bp->b_flags & XBF_ASYNC))
993 xfs_buf_ioerror_retry(
995 struct xfs_error_cfg *cfg)
997 if ((bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) &&
998 bp->b_last_error == bp->b_error)
1001 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1002 bp->b_last_error = bp->b_error;
1003 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1004 !bp->b_first_retry_time)
1005 bp->b_first_retry_time = jiffies;
1010 * Account for this latest trip around the retry handler, and decide if
1011 * we've failed enough times to constitute a permanent failure.
1014 xfs_buf_ioerror_permanent(
1016 struct xfs_error_cfg *cfg)
1018 struct xfs_mount *mp = bp->b_mount;
1020 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1021 ++bp->b_retries > cfg->max_retries)
1023 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1024 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1027 /* At unmount we may treat errors differently */
1028 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
1035 * On a sync write or shutdown we just want to stale the buffer and let the
1036 * caller handle the error in bp->b_error appropriately.
1038 * If the write was asynchronous then no one will be looking for the error. If
1039 * this is the first failure of this type, clear the error state and write the
1040 * buffer out again. This means we always retry an async write failure at least
1041 * once, but we also need to set the buffer up to behave correctly now for
1042 * repeated failures.
1044 * If we get repeated async write failures, then we take action according to the
1045 * error configuration we have been set up to use.
1047 * Multi-state return value:
1049 * XBF_IOERROR_FINISH: clear IO error retry state and run callback completions
1050 * XBF_IOERROR_DONE: resubmitted immediately, do not run any completions
1051 * XBF_IOERROR_FAIL: transient error, run failure callback completions and then
1052 * release the buffer
1061 xfs_buf_iodone_error(
1064 struct xfs_mount *mp = bp->b_mount;
1065 struct xfs_error_cfg *cfg;
1067 if (xfs_buf_ioerror_fail_without_retry(bp))
1070 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1072 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1073 if (xfs_buf_ioerror_retry(bp, cfg)) {
1074 xfs_buf_ioerror(bp, 0);
1076 return XBF_IOERROR_DONE;
1080 * Permanent error - we need to trigger a shutdown if we haven't already
1081 * to indicate that inconsistency will result from this action.
1083 if (xfs_buf_ioerror_permanent(bp, cfg)) {
1084 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1088 /* Still considered a transient error. Caller will schedule retries. */
1089 return XBF_IOERROR_FAIL;
1093 bp->b_flags |= XBF_DONE;
1094 trace_xfs_buf_error_relse(bp, _RET_IP_);
1095 return XBF_IOERROR_FINISH;
1102 struct xfs_buf_log_item *bip = bp->b_log_item;
1108 * If we are forcibly shutting down, this may well be off the AIL
1109 * already. That's because we simulate the log-committed callbacks to
1110 * unpin these buffers. Or we may never have put this item on AIL
1111 * because of the transaction was aborted forcibly.
1112 * xfs_trans_ail_delete() takes care of these.
1114 * Either way, AIL is useless if we're forcing a shutdown.
1116 xfs_trans_ail_delete(&bip->bli_item, SHUTDOWN_CORRUPT_INCORE);
1117 bp->b_log_item = NULL;
1118 xfs_buf_item_free(bip);
1123 xfs_buf_clear_ioerror_retry_state(
1126 bp->b_last_error = 0;
1128 bp->b_first_retry_time = 0;
1132 * Inode buffer iodone callback function.
1135 xfs_buf_inode_iodone(
1139 struct xfs_log_item *lip;
1140 int ret = xfs_buf_iodone_error(bp);
1142 if (ret == XBF_IOERROR_FINISH)
1144 if (ret == XBF_IOERROR_DONE)
1146 ASSERT(ret == XBF_IOERROR_FAIL);
1147 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1148 set_bit(XFS_LI_FAILED, &lip->li_flags);
1150 xfs_buf_ioerror(bp, 0);
1156 xfs_buf_clear_ioerror_retry_state(bp);
1157 xfs_buf_item_done(bp);
1158 xfs_iflush_done(bp);
1159 xfs_buf_ioend_finish(bp);
1163 * Dquot buffer iodone callback function.
1166 xfs_buf_dquot_iodone(
1170 struct xfs_log_item *lip;
1171 int ret = xfs_buf_iodone_error(bp);
1173 if (ret == XBF_IOERROR_FINISH)
1175 if (ret == XBF_IOERROR_DONE)
1177 ASSERT(ret == XBF_IOERROR_FAIL);
1178 spin_lock(&bp->b_mount->m_ail->ail_lock);
1179 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1180 xfs_set_li_failed(lip, bp);
1182 spin_unlock(&bp->b_mount->m_ail->ail_lock);
1183 xfs_buf_ioerror(bp, 0);
1189 xfs_buf_clear_ioerror_retry_state(bp);
1190 /* a newly allocated dquot buffer might have a log item attached */
1191 xfs_buf_item_done(bp);
1193 xfs_buf_ioend_finish(bp);
1197 * Dirty buffer iodone callback function.
1199 * Note that for things like remote attribute buffers, there may not be a buffer
1200 * log item here, so processing the buffer log item must remain be optional.
1207 int ret = xfs_buf_iodone_error(bp);
1209 if (ret == XBF_IOERROR_FINISH)
1211 if (ret == XBF_IOERROR_DONE)
1213 ASSERT(ret == XBF_IOERROR_FAIL);
1214 ASSERT(list_empty(&bp->b_li_list));
1215 xfs_buf_ioerror(bp, 0);
1221 xfs_buf_clear_ioerror_retry_state(bp);
1222 xfs_buf_item_done(bp);
1223 xfs_buf_ioend_finish(bp);