1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_buf_item.h"
16 #include "xfs_inode.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_dquot_item.h"
20 #include "xfs_dquot.h"
21 #include "xfs_trans_priv.h"
22 #include "xfs_trace.h"
26 kmem_zone_t *xfs_buf_item_zone;
28 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
30 return container_of(lip, struct xfs_buf_log_item, bli_item);
33 static void xfs_buf_item_done(struct xfs_buf *bp);
35 /* Is this log iovec plausibly large enough to contain the buffer log format? */
37 xfs_buf_log_check_iovec(
38 struct xfs_log_iovec *iovec)
40 struct xfs_buf_log_format *blfp = iovec->i_addr;
44 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
47 item_end = (char *)iovec->i_addr + iovec->i_len;
48 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
49 return bmp_end <= item_end;
53 xfs_buf_log_format_size(
54 struct xfs_buf_log_format *blfp)
56 return offsetof(struct xfs_buf_log_format, blf_data_map) +
57 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
61 * This returns the number of log iovecs needed to log the
64 * It calculates this as 1 iovec for the buf log format structure
65 * and 1 for each stretch of non-contiguous chunks to be logged.
66 * Contiguous chunks are logged in a single iovec.
68 * If the XFS_BLI_STALE flag has been set, then log nothing.
71 xfs_buf_item_size_segment(
72 struct xfs_buf_log_item *bip,
73 struct xfs_buf_log_format *blfp,
77 struct xfs_buf *bp = bip->bli_buf;
81 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
86 * initial count for a dirty buffer is 2 vectors - the format structure
87 * and the first dirty region.
90 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
92 while (last_bit != -1) {
94 * This takes the bit number to start looking from and
95 * returns the next set bit from there. It returns -1
96 * if there are no more bits set or the start bit is
97 * beyond the end of the bitmap.
99 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
102 * If we run out of bits, leave the loop,
103 * else if we find a new set of bits bump the number of vecs,
104 * else keep scanning the current set of bits.
106 if (next_bit == -1) {
108 } else if (next_bit != last_bit + 1) {
111 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
112 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
119 *nbytes += XFS_BLF_CHUNK;
124 * This returns the number of log iovecs needed to log the given buf log item.
126 * It calculates this as 1 iovec for the buf log format structure and 1 for each
127 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
130 * Discontiguous buffers need a format structure per region that that is being
131 * logged. This makes the changes in the buffer appear to log recovery as though
132 * they came from separate buffers, just like would occur if multiple buffers
133 * were used instead of a single discontiguous buffer. This enables
134 * discontiguous buffers to be in-memory constructs, completely transparent to
135 * what ends up on disk.
137 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
142 struct xfs_log_item *lip,
146 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
149 ASSERT(atomic_read(&bip->bli_refcount) > 0);
150 if (bip->bli_flags & XFS_BLI_STALE) {
152 * The buffer is stale, so all we need to log
153 * is the buf log format structure with the
156 trace_xfs_buf_item_size_stale(bip);
157 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
158 *nvecs += bip->bli_format_count;
159 for (i = 0; i < bip->bli_format_count; i++) {
160 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
165 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
167 if (bip->bli_flags & XFS_BLI_ORDERED) {
169 * The buffer has been logged just to order it.
170 * It is not being included in the transaction
171 * commit, so no vectors are used at all.
173 trace_xfs_buf_item_size_ordered(bip);
174 *nvecs = XFS_LOG_VEC_ORDERED;
179 * the vector count is based on the number of buffer vectors we have
180 * dirty bits in. This will only be greater than one when we have a
181 * compound buffer with more than one segment dirty. Hence for compound
182 * buffers we need to track which segment the dirty bits correspond to,
183 * and when we move from one segment to the next increment the vector
184 * count for the extra buf log format structure that will need to be
187 for (i = 0; i < bip->bli_format_count; i++) {
188 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
191 trace_xfs_buf_item_size(bip);
195 xfs_buf_item_copy_iovec(
196 struct xfs_log_vec *lv,
197 struct xfs_log_iovec **vecp,
203 offset += first_bit * XFS_BLF_CHUNK;
204 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
205 xfs_buf_offset(bp, offset),
206 nbits * XFS_BLF_CHUNK);
210 xfs_buf_item_straddle(
216 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
217 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
222 xfs_buf_item_format_segment(
223 struct xfs_buf_log_item *bip,
224 struct xfs_log_vec *lv,
225 struct xfs_log_iovec **vecp,
227 struct xfs_buf_log_format *blfp)
229 struct xfs_buf *bp = bip->bli_buf;
236 /* copy the flags across from the base format item */
237 blfp->blf_flags = bip->__bli_format.blf_flags;
240 * Base size is the actual size of the ondisk structure - it reflects
241 * the actual size of the dirty bitmap rather than the size of the in
244 base_size = xfs_buf_log_format_size(blfp);
246 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
247 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
249 * If the map is not be dirty in the transaction, mark
250 * the size as zero and do not advance the vector pointer.
255 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
258 if (bip->bli_flags & XFS_BLI_STALE) {
260 * The buffer is stale, so all we need to log
261 * is the buf log format structure with the
264 trace_xfs_buf_item_format_stale(bip);
265 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
271 * Fill in an iovec for each set of contiguous chunks.
273 last_bit = first_bit;
277 * This takes the bit number to start looking from and
278 * returns the next set bit from there. It returns -1
279 * if there are no more bits set or the start bit is
280 * beyond the end of the bitmap.
282 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
285 * If we run out of bits fill in the last iovec and get out of
286 * the loop. Else if we start a new set of bits then fill in
287 * the iovec for the series we were looking at and start
288 * counting the bits in the new one. Else we're still in the
289 * same set of bits so just keep counting and scanning.
291 if (next_bit == -1) {
292 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
296 } else if (next_bit != last_bit + 1 ||
297 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
298 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
301 first_bit = next_bit;
312 * This is called to fill in the vector of log iovecs for the
313 * given log buf item. It fills the first entry with a buf log
314 * format structure, and the rest point to contiguous chunks
319 struct xfs_log_item *lip,
320 struct xfs_log_vec *lv)
322 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
323 struct xfs_buf *bp = bip->bli_buf;
324 struct xfs_log_iovec *vecp = NULL;
328 ASSERT(atomic_read(&bip->bli_refcount) > 0);
329 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
330 (bip->bli_flags & XFS_BLI_STALE));
331 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
332 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
333 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
334 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
335 (bip->bli_flags & XFS_BLI_STALE));
339 * If it is an inode buffer, transfer the in-memory state to the
340 * format flags and clear the in-memory state.
342 * For buffer based inode allocation, we do not transfer
343 * this state if the inode buffer allocation has not yet been committed
344 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
345 * correct replay of the inode allocation.
347 * For icreate item based inode allocation, the buffers aren't written
348 * to the journal during allocation, and hence we should always tag the
349 * buffer as an inode buffer so that the correct unlinked list replay
350 * occurs during recovery.
352 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
353 if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
354 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
355 xfs_log_item_in_current_chkpt(lip)))
356 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
357 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
360 for (i = 0; i < bip->bli_format_count; i++) {
361 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
362 &bip->bli_formats[i]);
363 offset += BBTOB(bp->b_maps[i].bm_len);
367 * Check to make sure everything is consistent.
369 trace_xfs_buf_item_format(bip);
373 * This is called to pin the buffer associated with the buf log item in memory
374 * so it cannot be written out.
376 * We also always take a reference to the buffer log item here so that the bli
377 * is held while the item is pinned in memory. This means that we can
378 * unconditionally drop the reference count a transaction holds when the
379 * transaction is completed.
383 struct xfs_log_item *lip)
385 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
387 ASSERT(atomic_read(&bip->bli_refcount) > 0);
388 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
389 (bip->bli_flags & XFS_BLI_ORDERED) ||
390 (bip->bli_flags & XFS_BLI_STALE));
392 trace_xfs_buf_item_pin(bip);
394 atomic_inc(&bip->bli_refcount);
395 atomic_inc(&bip->bli_buf->b_pin_count);
399 * This is called to unpin the buffer associated with the buf log
400 * item which was previously pinned with a call to xfs_buf_item_pin().
402 * Also drop the reference to the buf item for the current transaction.
403 * If the XFS_BLI_STALE flag is set and we are the last reference,
404 * then free up the buf log item and unlock the buffer.
406 * If the remove flag is set we are called from uncommit in the
407 * forced-shutdown path. If that is true and the reference count on
408 * the log item is going to drop to zero we need to free the item's
409 * descriptor in the transaction.
413 struct xfs_log_item *lip,
416 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
417 xfs_buf_t *bp = bip->bli_buf;
418 int stale = bip->bli_flags & XFS_BLI_STALE;
421 ASSERT(bp->b_log_item == bip);
422 ASSERT(atomic_read(&bip->bli_refcount) > 0);
424 trace_xfs_buf_item_unpin(bip);
426 freed = atomic_dec_and_test(&bip->bli_refcount);
428 if (atomic_dec_and_test(&bp->b_pin_count))
429 wake_up_all(&bp->b_waiters);
431 if (freed && stale) {
432 ASSERT(bip->bli_flags & XFS_BLI_STALE);
433 ASSERT(xfs_buf_islocked(bp));
434 ASSERT(bp->b_flags & XBF_STALE);
435 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
437 trace_xfs_buf_item_unpin_stale(bip);
441 * If we are in a transaction context, we have to
442 * remove the log item from the transaction as we are
443 * about to release our reference to the buffer. If we
444 * don't, the unlock that occurs later in
445 * xfs_trans_uncommit() will try to reference the
446 * buffer which we no longer have a hold on.
448 if (!list_empty(&lip->li_trans))
449 xfs_trans_del_item(lip);
452 * Since the transaction no longer refers to the buffer,
453 * the buffer should no longer refer to the transaction.
459 * If we get called here because of an IO error, we may or may
460 * not have the item on the AIL. xfs_trans_ail_delete() will
461 * take care of that situation. xfs_trans_ail_delete() drops
464 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
465 xfs_buf_item_done(bp);
468 xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
469 xfs_buf_item_relse(bp);
470 ASSERT(bp->b_log_item == NULL);
473 } else if (freed && remove) {
475 * The buffer must be locked and held by the caller to simulate
476 * an async I/O failure.
480 bp->b_flags |= XBF_ASYNC;
481 xfs_buf_ioend_fail(bp);
487 struct xfs_log_item *lip,
488 struct list_head *buffer_list)
490 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
491 struct xfs_buf *bp = bip->bli_buf;
492 uint rval = XFS_ITEM_SUCCESS;
494 if (xfs_buf_ispinned(bp))
495 return XFS_ITEM_PINNED;
496 if (!xfs_buf_trylock(bp)) {
498 * If we have just raced with a buffer being pinned and it has
499 * been marked stale, we could end up stalling until someone else
500 * issues a log force to unpin the stale buffer. Check for the
501 * race condition here so xfsaild recognizes the buffer is pinned
502 * and queues a log force to move it along.
504 if (xfs_buf_ispinned(bp))
505 return XFS_ITEM_PINNED;
506 return XFS_ITEM_LOCKED;
509 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
511 trace_xfs_buf_item_push(bip);
513 /* has a previous flush failed due to IO errors? */
514 if (bp->b_flags & XBF_WRITE_FAIL) {
515 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
516 "Failing async write on buffer block 0x%llx. Retrying async write.",
517 (long long)bp->b_bn);
520 if (!xfs_buf_delwri_queue(bp, buffer_list))
521 rval = XFS_ITEM_FLUSHING;
527 * Drop the buffer log item refcount and take appropriate action. This helper
528 * determines whether the bli must be freed or not, since a decrement to zero
529 * does not necessarily mean the bli is unused.
531 * Return true if the bli is freed, false otherwise.
535 struct xfs_buf_log_item *bip)
537 struct xfs_log_item *lip = &bip->bli_item;
541 /* drop the bli ref and return if it wasn't the last one */
542 if (!atomic_dec_and_test(&bip->bli_refcount))
546 * We dropped the last ref and must free the item if clean or aborted.
547 * If the bli is dirty and non-aborted, the buffer was clean in the
548 * transaction but still awaiting writeback from previous changes. In
549 * that case, the bli is freed on buffer writeback completion.
551 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
552 XFS_FORCED_SHUTDOWN(lip->li_mountp);
553 dirty = bip->bli_flags & XFS_BLI_DIRTY;
554 if (dirty && !aborted)
558 * The bli is aborted or clean. An aborted item may be in the AIL
559 * regardless of dirty state. For example, consider an aborted
560 * transaction that invalidated a dirty bli and cleared the dirty
564 xfs_trans_ail_delete(lip, 0);
565 xfs_buf_item_relse(bip->bli_buf);
570 * Release the buffer associated with the buf log item. If there is no dirty
571 * logged data associated with the buffer recorded in the buf log item, then
572 * free the buf log item and remove the reference to it in the buffer.
574 * This call ignores the recursion count. It is only called when the buffer
575 * should REALLY be unlocked, regardless of the recursion count.
577 * We unconditionally drop the transaction's reference to the log item. If the
578 * item was logged, then another reference was taken when it was pinned, so we
579 * can safely drop the transaction reference now. This also allows us to avoid
580 * potential races with the unpin code freeing the bli by not referencing the
581 * bli after we've dropped the reference count.
583 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
584 * if necessary but do not unlock the buffer. This is for support of
585 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
589 xfs_buf_item_release(
590 struct xfs_log_item *lip)
592 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
593 struct xfs_buf *bp = bip->bli_buf;
595 bool hold = bip->bli_flags & XFS_BLI_HOLD;
596 bool stale = bip->bli_flags & XFS_BLI_STALE;
597 #if defined(DEBUG) || defined(XFS_WARN)
598 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
599 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
600 bool aborted = test_bit(XFS_LI_ABORTED,
604 trace_xfs_buf_item_release(bip);
607 * The bli dirty state should match whether the blf has logged segments
608 * except for ordered buffers, where only the bli should be dirty.
610 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
611 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
612 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
615 * Clear the buffer's association with this transaction and
616 * per-transaction state from the bli, which has been copied above.
619 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
622 * Unref the item and unlock the buffer unless held or stale. Stale
623 * buffers remain locked until final unpin unless the bli is freed by
624 * the unref call. The latter implies shutdown because buffer
625 * invalidation dirties the bli and transaction.
627 released = xfs_buf_item_put(bip);
628 if (hold || (stale && !released))
630 ASSERT(!stale || aborted);
635 xfs_buf_item_committing(
636 struct xfs_log_item *lip,
637 xfs_lsn_t commit_lsn)
639 return xfs_buf_item_release(lip);
643 * This is called to find out where the oldest active copy of the
644 * buf log item in the on disk log resides now that the last log
645 * write of it completed at the given lsn.
646 * We always re-log all the dirty data in a buffer, so usually the
647 * latest copy in the on disk log is the only one that matters. For
648 * those cases we simply return the given lsn.
650 * The one exception to this is for buffers full of newly allocated
651 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
652 * flag set, indicating that only the di_next_unlinked fields from the
653 * inodes in the buffers will be replayed during recovery. If the
654 * original newly allocated inode images have not yet been flushed
655 * when the buffer is so relogged, then we need to make sure that we
656 * keep the old images in the 'active' portion of the log. We do this
657 * by returning the original lsn of that transaction here rather than
661 xfs_buf_item_committed(
662 struct xfs_log_item *lip,
665 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
667 trace_xfs_buf_item_committed(bip);
669 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
674 static const struct xfs_item_ops xfs_buf_item_ops = {
675 .iop_size = xfs_buf_item_size,
676 .iop_format = xfs_buf_item_format,
677 .iop_pin = xfs_buf_item_pin,
678 .iop_unpin = xfs_buf_item_unpin,
679 .iop_release = xfs_buf_item_release,
680 .iop_committing = xfs_buf_item_committing,
681 .iop_committed = xfs_buf_item_committed,
682 .iop_push = xfs_buf_item_push,
686 xfs_buf_item_get_format(
687 struct xfs_buf_log_item *bip,
690 ASSERT(bip->bli_formats == NULL);
691 bip->bli_format_count = count;
694 bip->bli_formats = &bip->__bli_format;
698 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
703 xfs_buf_item_free_format(
704 struct xfs_buf_log_item *bip)
706 if (bip->bli_formats != &bip->__bli_format) {
707 kmem_free(bip->bli_formats);
708 bip->bli_formats = NULL;
713 * Allocate a new buf log item to go with the given buffer.
714 * Set the buffer's b_log_item field to point to the new
720 struct xfs_mount *mp)
722 struct xfs_buf_log_item *bip = bp->b_log_item;
728 * Check to see if there is already a buf log item for
729 * this buffer. If we do already have one, there is
730 * nothing to do here so return.
732 ASSERT(bp->b_mount == mp);
734 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
735 ASSERT(!bp->b_transp);
736 ASSERT(bip->bli_buf == bp);
740 bip = kmem_zone_zalloc(xfs_buf_item_zone, 0);
741 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
745 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
746 * can be divided into. Make sure not to truncate any pieces.
747 * map_size is the size of the bitmap needed to describe the
748 * chunks of the buffer.
750 * Discontiguous buffer support follows the layout of the underlying
751 * buffer. This makes the implementation as simple as possible.
753 xfs_buf_item_get_format(bip, bp->b_map_count);
755 for (i = 0; i < bip->bli_format_count; i++) {
756 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
758 map_size = DIV_ROUND_UP(chunks, NBWORD);
760 if (map_size > XFS_BLF_DATAMAP_SIZE) {
761 kmem_cache_free(xfs_buf_item_zone, bip);
763 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
765 BBTOB(bp->b_maps[i].bm_len));
766 return -EFSCORRUPTED;
769 bip->bli_formats[i].blf_type = XFS_LI_BUF;
770 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
771 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
772 bip->bli_formats[i].blf_map_size = map_size;
775 bp->b_log_item = bip;
782 * Mark bytes first through last inclusive as dirty in the buf
786 xfs_buf_item_log_segment(
801 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
802 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
805 * Convert byte offsets to bit numbers.
807 first_bit = first >> XFS_BLF_SHIFT;
808 last_bit = last >> XFS_BLF_SHIFT;
811 * Calculate the total number of bits to be set.
813 bits_to_set = last_bit - first_bit + 1;
816 * Get a pointer to the first word in the bitmap
819 word_num = first_bit >> BIT_TO_WORD_SHIFT;
820 wordp = &map[word_num];
823 * Calculate the starting bit in the first word.
825 bit = first_bit & (uint)(NBWORD - 1);
828 * First set any bits in the first word of our range.
829 * If it starts at bit 0 of the word, it will be
830 * set below rather than here. That is what the variable
831 * bit tells us. The variable bits_set tracks the number
832 * of bits that have been set so far. End_bit is the number
833 * of the last bit to be set in this word plus one.
836 end_bit = min(bit + bits_to_set, (uint)NBWORD);
837 mask = ((1U << (end_bit - bit)) - 1) << bit;
840 bits_set = end_bit - bit;
846 * Now set bits a whole word at a time that are between
847 * first_bit and last_bit.
849 while ((bits_to_set - bits_set) >= NBWORD) {
856 * Finally, set any bits left to be set in one last partial word.
858 end_bit = bits_to_set - bits_set;
860 mask = (1U << end_bit) - 1;
866 * Mark bytes first through last inclusive as dirty in the buf
871 struct xfs_buf_log_item *bip,
878 struct xfs_buf *bp = bip->bli_buf;
881 * walk each buffer segment and mark them dirty appropriately.
884 for (i = 0; i < bip->bli_format_count; i++) {
887 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
889 /* skip to the map that includes the first byte to log */
891 start += BBTOB(bp->b_maps[i].bm_len);
896 * Trim the range to this segment and mark it in the bitmap.
897 * Note that we must convert buffer offsets to segment relative
898 * offsets (e.g., the first byte of each segment is byte 0 of
905 xfs_buf_item_log_segment(first - start, end - start,
906 &bip->bli_formats[i].blf_data_map[0]);
908 start += BBTOB(bp->b_maps[i].bm_len);
914 * Return true if the buffer has any ranges logged/dirtied by a transaction,
918 xfs_buf_item_dirty_format(
919 struct xfs_buf_log_item *bip)
923 for (i = 0; i < bip->bli_format_count; i++) {
924 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
925 bip->bli_formats[i].blf_map_size))
934 struct xfs_buf_log_item *bip)
936 xfs_buf_item_free_format(bip);
937 kmem_free(bip->bli_item.li_lv_shadow);
938 kmem_cache_free(xfs_buf_item_zone, bip);
942 * xfs_buf_item_relse() is called when the buf log item is no longer needed.
948 struct xfs_buf_log_item *bip = bp->b_log_item;
950 trace_xfs_buf_item_relse(bp, _RET_IP_);
951 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
953 bp->b_log_item = NULL;
955 xfs_buf_item_free(bip);
959 * Invoke the error state callback for each log item affected by the failed I/O.
961 * If a metadata buffer write fails with a non-permanent error, the buffer is
962 * eventually resubmitted and so the completion callbacks are not run. The error
963 * state may need to be propagated to the log items attached to the buffer,
964 * however, so the next AIL push of the item knows hot to handle it correctly.
967 xfs_buf_do_callbacks_fail(
970 struct xfs_ail *ailp = bp->b_mount->m_ail;
971 struct xfs_log_item *lip;
974 * Buffer log item errors are handled directly by xfs_buf_item_push()
975 * and xfs_buf_iodone_callback_error, and they have no IO error
976 * callbacks. Check only for items in b_li_list.
978 if (list_empty(&bp->b_li_list))
981 spin_lock(&ailp->ail_lock);
982 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
983 if (lip->li_ops->iop_error)
984 lip->li_ops->iop_error(lip, bp);
986 spin_unlock(&ailp->ail_lock);
990 * Decide if we're going to retry the write after a failure, and prepare
991 * the buffer for retrying the write.
994 xfs_buf_ioerror_fail_without_retry(
997 struct xfs_mount *mp = bp->b_mount;
998 static ulong lasttime;
999 static xfs_buftarg_t *lasttarg;
1002 * If we've already decided to shutdown the filesystem because of
1003 * I/O errors, there's no point in giving this a retry.
1005 if (XFS_FORCED_SHUTDOWN(mp))
1008 if (bp->b_target != lasttarg ||
1009 time_after(jiffies, (lasttime + 5*HZ))) {
1011 xfs_buf_ioerror_alert(bp, __this_address);
1013 lasttarg = bp->b_target;
1015 /* synchronous writes will have callers process the error */
1016 if (!(bp->b_flags & XBF_ASYNC))
1022 xfs_buf_ioerror_retry(
1024 struct xfs_error_cfg *cfg)
1026 if ((bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) &&
1027 bp->b_last_error == bp->b_error)
1030 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1031 bp->b_last_error = bp->b_error;
1032 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1033 !bp->b_first_retry_time)
1034 bp->b_first_retry_time = jiffies;
1039 * Account for this latest trip around the retry handler, and decide if
1040 * we've failed enough times to constitute a permanent failure.
1043 xfs_buf_ioerror_permanent(
1045 struct xfs_error_cfg *cfg)
1047 struct xfs_mount *mp = bp->b_mount;
1049 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1050 ++bp->b_retries > cfg->max_retries)
1052 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1053 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1056 /* At unmount we may treat errors differently */
1057 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
1064 * On a sync write or shutdown we just want to stale the buffer and let the
1065 * caller handle the error in bp->b_error appropriately.
1067 * If the write was asynchronous then no one will be looking for the error. If
1068 * this is the first failure of this type, clear the error state and write the
1069 * buffer out again. This means we always retry an async write failure at least
1070 * once, but we also need to set the buffer up to behave correctly now for
1071 * repeated failures.
1073 * If we get repeated async write failures, then we take action according to the
1074 * error configuration we have been set up to use.
1076 * Multi-state return value:
1078 * XBF_IOERROR_FINISH: clear IO error retry state and run callback completions
1079 * XBF_IOERROR_DONE: resubmitted immediately, do not run any completions
1080 * XBF_IOERROR_FAIL: transient error, run failure callback completions and then
1081 * release the buffer
1090 xfs_buf_iodone_error(
1093 struct xfs_mount *mp = bp->b_mount;
1094 struct xfs_error_cfg *cfg;
1096 if (xfs_buf_ioerror_fail_without_retry(bp))
1099 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1101 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1102 if (xfs_buf_ioerror_retry(bp, cfg)) {
1103 xfs_buf_ioerror(bp, 0);
1105 return XBF_IOERROR_DONE;
1109 * Permanent error - we need to trigger a shutdown if we haven't already
1110 * to indicate that inconsistency will result from this action.
1112 if (xfs_buf_ioerror_permanent(bp, cfg)) {
1113 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1117 /* Still considered a transient error. Caller will schedule retries. */
1118 return XBF_IOERROR_FAIL;
1122 bp->b_flags |= XBF_DONE;
1123 trace_xfs_buf_error_relse(bp, _RET_IP_);
1124 return XBF_IOERROR_FINISH;
1131 struct xfs_buf_log_item *bip = bp->b_log_item;
1137 * If we are forcibly shutting down, this may well be off the AIL
1138 * already. That's because we simulate the log-committed callbacks to
1139 * unpin these buffers. Or we may never have put this item on AIL
1140 * because of the transaction was aborted forcibly.
1141 * xfs_trans_ail_delete() takes care of these.
1143 * Either way, AIL is useless if we're forcing a shutdown.
1145 xfs_trans_ail_delete(&bip->bli_item, SHUTDOWN_CORRUPT_INCORE);
1146 bp->b_log_item = NULL;
1147 xfs_buf_item_free(bip);
1152 xfs_buf_clear_ioerror_retry_state(
1155 bp->b_last_error = 0;
1157 bp->b_first_retry_time = 0;
1161 * Inode buffer iodone callback function.
1164 xfs_buf_inode_iodone(
1168 int ret = xfs_buf_iodone_error(bp);
1170 if (ret == XBF_IOERROR_FINISH)
1172 if (ret == XBF_IOERROR_DONE)
1174 ASSERT(ret == XBF_IOERROR_FAIL);
1175 xfs_buf_do_callbacks_fail(bp);
1176 xfs_buf_ioerror(bp, 0);
1182 xfs_buf_clear_ioerror_retry_state(bp);
1183 xfs_buf_item_done(bp);
1184 xfs_iflush_done(bp);
1185 xfs_buf_ioend_finish(bp);
1189 * Dquot buffer iodone callback function.
1192 xfs_buf_dquot_iodone(
1196 int ret = xfs_buf_iodone_error(bp);
1198 if (ret == XBF_IOERROR_FINISH)
1200 if (ret == XBF_IOERROR_DONE)
1202 ASSERT(ret == XBF_IOERROR_FAIL);
1203 xfs_buf_do_callbacks_fail(bp);
1204 xfs_buf_ioerror(bp, 0);
1210 xfs_buf_clear_ioerror_retry_state(bp);
1211 /* a newly allocated dquot buffer might have a log item attached */
1212 xfs_buf_item_done(bp);
1214 xfs_buf_ioend_finish(bp);
1218 * Dirty buffer iodone callback function.
1220 * Note that for things like remote attribute buffers, there may not be a buffer
1221 * log item here, so processing the buffer log item must remain be optional.
1228 int ret = xfs_buf_iodone_error(bp);
1230 if (ret == XBF_IOERROR_FINISH)
1232 if (ret == XBF_IOERROR_DONE)
1234 ASSERT(ret == XBF_IOERROR_FAIL);
1235 xfs_buf_do_callbacks_fail(bp);
1236 xfs_buf_ioerror(bp, 0);
1242 xfs_buf_clear_ioerror_retry_state(bp);
1243 xfs_buf_item_done(bp);
1244 xfs_buf_ioend_finish(bp);