1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_buf_item.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_trace.h"
21 kmem_zone_t *xfs_buf_item_zone;
23 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
25 return container_of(lip, struct xfs_buf_log_item, bli_item);
28 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
30 /* Is this log iovec plausibly large enough to contain the buffer log format? */
32 xfs_buf_log_check_iovec(
33 struct xfs_log_iovec *iovec)
35 struct xfs_buf_log_format *blfp = iovec->i_addr;
39 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
42 item_end = (char *)iovec->i_addr + iovec->i_len;
43 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
44 return bmp_end <= item_end;
48 xfs_buf_log_format_size(
49 struct xfs_buf_log_format *blfp)
51 return offsetof(struct xfs_buf_log_format, blf_data_map) +
52 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
56 * This returns the number of log iovecs needed to log the
59 * It calculates this as 1 iovec for the buf log format structure
60 * and 1 for each stretch of non-contiguous chunks to be logged.
61 * Contiguous chunks are logged in a single iovec.
63 * If the XFS_BLI_STALE flag has been set, then log nothing.
66 xfs_buf_item_size_segment(
67 struct xfs_buf_log_item *bip,
68 struct xfs_buf_log_format *blfp,
72 struct xfs_buf *bp = bip->bli_buf;
76 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
81 * initial count for a dirty buffer is 2 vectors - the format structure
82 * and the first dirty region.
85 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
87 while (last_bit != -1) {
89 * This takes the bit number to start looking from and
90 * returns the next set bit from there. It returns -1
91 * if there are no more bits set or the start bit is
92 * beyond the end of the bitmap.
94 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
97 * If we run out of bits, leave the loop,
98 * else if we find a new set of bits bump the number of vecs,
99 * else keep scanning the current set of bits.
101 if (next_bit == -1) {
103 } else if (next_bit != last_bit + 1) {
106 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
107 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
114 *nbytes += XFS_BLF_CHUNK;
119 * This returns the number of log iovecs needed to log the given buf log item.
121 * It calculates this as 1 iovec for the buf log format structure and 1 for each
122 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
125 * Discontiguous buffers need a format structure per region that that is being
126 * logged. This makes the changes in the buffer appear to log recovery as though
127 * they came from separate buffers, just like would occur if multiple buffers
128 * were used instead of a single discontiguous buffer. This enables
129 * discontiguous buffers to be in-memory constructs, completely transparent to
130 * what ends up on disk.
132 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
137 struct xfs_log_item *lip,
141 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
144 ASSERT(atomic_read(&bip->bli_refcount) > 0);
145 if (bip->bli_flags & XFS_BLI_STALE) {
147 * The buffer is stale, so all we need to log
148 * is the buf log format structure with the
151 trace_xfs_buf_item_size_stale(bip);
152 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
153 *nvecs += bip->bli_format_count;
154 for (i = 0; i < bip->bli_format_count; i++) {
155 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
160 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
162 if (bip->bli_flags & XFS_BLI_ORDERED) {
164 * The buffer has been logged just to order it.
165 * It is not being included in the transaction
166 * commit, so no vectors are used at all.
168 trace_xfs_buf_item_size_ordered(bip);
169 *nvecs = XFS_LOG_VEC_ORDERED;
174 * the vector count is based on the number of buffer vectors we have
175 * dirty bits in. This will only be greater than one when we have a
176 * compound buffer with more than one segment dirty. Hence for compound
177 * buffers we need to track which segment the dirty bits correspond to,
178 * and when we move from one segment to the next increment the vector
179 * count for the extra buf log format structure that will need to be
182 for (i = 0; i < bip->bli_format_count; i++) {
183 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
186 trace_xfs_buf_item_size(bip);
190 xfs_buf_item_copy_iovec(
191 struct xfs_log_vec *lv,
192 struct xfs_log_iovec **vecp,
198 offset += first_bit * XFS_BLF_CHUNK;
199 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
200 xfs_buf_offset(bp, offset),
201 nbits * XFS_BLF_CHUNK);
205 xfs_buf_item_straddle(
211 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
212 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
217 xfs_buf_item_format_segment(
218 struct xfs_buf_log_item *bip,
219 struct xfs_log_vec *lv,
220 struct xfs_log_iovec **vecp,
222 struct xfs_buf_log_format *blfp)
224 struct xfs_buf *bp = bip->bli_buf;
231 /* copy the flags across from the base format item */
232 blfp->blf_flags = bip->__bli_format.blf_flags;
235 * Base size is the actual size of the ondisk structure - it reflects
236 * the actual size of the dirty bitmap rather than the size of the in
239 base_size = xfs_buf_log_format_size(blfp);
241 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
242 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
244 * If the map is not be dirty in the transaction, mark
245 * the size as zero and do not advance the vector pointer.
250 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
253 if (bip->bli_flags & XFS_BLI_STALE) {
255 * The buffer is stale, so all we need to log
256 * is the buf log format structure with the
259 trace_xfs_buf_item_format_stale(bip);
260 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
266 * Fill in an iovec for each set of contiguous chunks.
268 last_bit = first_bit;
272 * This takes the bit number to start looking from and
273 * returns the next set bit from there. It returns -1
274 * if there are no more bits set or the start bit is
275 * beyond the end of the bitmap.
277 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
280 * If we run out of bits fill in the last iovec and get out of
281 * the loop. Else if we start a new set of bits then fill in
282 * the iovec for the series we were looking at and start
283 * counting the bits in the new one. Else we're still in the
284 * same set of bits so just keep counting and scanning.
286 if (next_bit == -1) {
287 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
291 } else if (next_bit != last_bit + 1 ||
292 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
293 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
296 first_bit = next_bit;
307 * This is called to fill in the vector of log iovecs for the
308 * given log buf item. It fills the first entry with a buf log
309 * format structure, and the rest point to contiguous chunks
314 struct xfs_log_item *lip,
315 struct xfs_log_vec *lv)
317 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
318 struct xfs_buf *bp = bip->bli_buf;
319 struct xfs_log_iovec *vecp = NULL;
323 ASSERT(atomic_read(&bip->bli_refcount) > 0);
324 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
325 (bip->bli_flags & XFS_BLI_STALE));
326 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
327 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
328 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
329 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
330 (bip->bli_flags & XFS_BLI_STALE));
334 * If it is an inode buffer, transfer the in-memory state to the
335 * format flags and clear the in-memory state.
337 * For buffer based inode allocation, we do not transfer
338 * this state if the inode buffer allocation has not yet been committed
339 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
340 * correct replay of the inode allocation.
342 * For icreate item based inode allocation, the buffers aren't written
343 * to the journal during allocation, and hence we should always tag the
344 * buffer as an inode buffer so that the correct unlinked list replay
345 * occurs during recovery.
347 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
348 if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
349 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
350 xfs_log_item_in_current_chkpt(lip)))
351 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
352 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
355 for (i = 0; i < bip->bli_format_count; i++) {
356 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
357 &bip->bli_formats[i]);
358 offset += BBTOB(bp->b_maps[i].bm_len);
362 * Check to make sure everything is consistent.
364 trace_xfs_buf_item_format(bip);
368 * This is called to pin the buffer associated with the buf log item in memory
369 * so it cannot be written out.
371 * We also always take a reference to the buffer log item here so that the bli
372 * is held while the item is pinned in memory. This means that we can
373 * unconditionally drop the reference count a transaction holds when the
374 * transaction is completed.
378 struct xfs_log_item *lip)
380 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
382 ASSERT(atomic_read(&bip->bli_refcount) > 0);
383 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
384 (bip->bli_flags & XFS_BLI_ORDERED) ||
385 (bip->bli_flags & XFS_BLI_STALE));
387 trace_xfs_buf_item_pin(bip);
389 atomic_inc(&bip->bli_refcount);
390 atomic_inc(&bip->bli_buf->b_pin_count);
394 * This is called to unpin the buffer associated with the buf log
395 * item which was previously pinned with a call to xfs_buf_item_pin().
397 * Also drop the reference to the buf item for the current transaction.
398 * If the XFS_BLI_STALE flag is set and we are the last reference,
399 * then free up the buf log item and unlock the buffer.
401 * If the remove flag is set we are called from uncommit in the
402 * forced-shutdown path. If that is true and the reference count on
403 * the log item is going to drop to zero we need to free the item's
404 * descriptor in the transaction.
408 struct xfs_log_item *lip,
411 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
412 xfs_buf_t *bp = bip->bli_buf;
413 struct xfs_ail *ailp = lip->li_ailp;
414 int stale = bip->bli_flags & XFS_BLI_STALE;
417 ASSERT(bp->b_log_item == bip);
418 ASSERT(atomic_read(&bip->bli_refcount) > 0);
420 trace_xfs_buf_item_unpin(bip);
422 freed = atomic_dec_and_test(&bip->bli_refcount);
424 if (atomic_dec_and_test(&bp->b_pin_count))
425 wake_up_all(&bp->b_waiters);
427 if (freed && stale) {
428 ASSERT(bip->bli_flags & XFS_BLI_STALE);
429 ASSERT(xfs_buf_islocked(bp));
430 ASSERT(bp->b_flags & XBF_STALE);
431 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
433 trace_xfs_buf_item_unpin_stale(bip);
437 * If we are in a transaction context, we have to
438 * remove the log item from the transaction as we are
439 * about to release our reference to the buffer. If we
440 * don't, the unlock that occurs later in
441 * xfs_trans_uncommit() will try to reference the
442 * buffer which we no longer have a hold on.
444 if (!list_empty(&lip->li_trans))
445 xfs_trans_del_item(lip);
448 * Since the transaction no longer refers to the buffer,
449 * the buffer should no longer refer to the transaction.
455 * If we get called here because of an IO error, we may
456 * or may not have the item on the AIL. xfs_trans_ail_delete()
457 * will take care of that situation.
458 * xfs_trans_ail_delete() drops the AIL lock.
460 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
461 xfs_buf_do_callbacks(bp);
462 bp->b_log_item = NULL;
463 list_del_init(&bp->b_li_list);
466 spin_lock(&ailp->ail_lock);
467 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
468 xfs_buf_item_relse(bp);
469 ASSERT(bp->b_log_item == NULL);
472 } else if (freed && remove) {
474 * The buffer must be locked and held by the caller to simulate
475 * an async I/O failure.
479 bp->b_flags |= XBF_ASYNC;
480 xfs_buf_ioend_fail(bp);
486 struct xfs_log_item *lip,
487 struct list_head *buffer_list)
489 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
490 struct xfs_buf *bp = bip->bli_buf;
491 uint rval = XFS_ITEM_SUCCESS;
493 if (xfs_buf_ispinned(bp))
494 return XFS_ITEM_PINNED;
495 if (!xfs_buf_trylock(bp)) {
497 * If we have just raced with a buffer being pinned and it has
498 * been marked stale, we could end up stalling until someone else
499 * issues a log force to unpin the stale buffer. Check for the
500 * race condition here so xfsaild recognizes the buffer is pinned
501 * and queues a log force to move it along.
503 if (xfs_buf_ispinned(bp))
504 return XFS_ITEM_PINNED;
505 return XFS_ITEM_LOCKED;
508 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
510 trace_xfs_buf_item_push(bip);
512 /* has a previous flush failed due to IO errors? */
513 if (bp->b_flags & XBF_WRITE_FAIL) {
514 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
515 "Failing async write on buffer block 0x%llx. Retrying async write.",
516 (long long)bp->b_bn);
519 if (!xfs_buf_delwri_queue(bp, buffer_list))
520 rval = XFS_ITEM_FLUSHING;
526 * Drop the buffer log item refcount and take appropriate action. This helper
527 * determines whether the bli must be freed or not, since a decrement to zero
528 * does not necessarily mean the bli is unused.
530 * Return true if the bli is freed, false otherwise.
534 struct xfs_buf_log_item *bip)
536 struct xfs_log_item *lip = &bip->bli_item;
540 /* drop the bli ref and return if it wasn't the last one */
541 if (!atomic_dec_and_test(&bip->bli_refcount))
545 * We dropped the last ref and must free the item if clean or aborted.
546 * If the bli is dirty and non-aborted, the buffer was clean in the
547 * transaction but still awaiting writeback from previous changes. In
548 * that case, the bli is freed on buffer writeback completion.
550 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
551 XFS_FORCED_SHUTDOWN(lip->li_mountp);
552 dirty = bip->bli_flags & XFS_BLI_DIRTY;
553 if (dirty && !aborted)
557 * The bli is aborted or clean. An aborted item may be in the AIL
558 * regardless of dirty state. For example, consider an aborted
559 * transaction that invalidated a dirty bli and cleared the dirty
563 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
564 xfs_buf_item_relse(bip->bli_buf);
569 * Release the buffer associated with the buf log item. If there is no dirty
570 * logged data associated with the buffer recorded in the buf log item, then
571 * free the buf log item and remove the reference to it in the buffer.
573 * This call ignores the recursion count. It is only called when the buffer
574 * should REALLY be unlocked, regardless of the recursion count.
576 * We unconditionally drop the transaction's reference to the log item. If the
577 * item was logged, then another reference was taken when it was pinned, so we
578 * can safely drop the transaction reference now. This also allows us to avoid
579 * potential races with the unpin code freeing the bli by not referencing the
580 * bli after we've dropped the reference count.
582 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
583 * if necessary but do not unlock the buffer. This is for support of
584 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
588 xfs_buf_item_release(
589 struct xfs_log_item *lip)
591 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
592 struct xfs_buf *bp = bip->bli_buf;
594 bool hold = bip->bli_flags & XFS_BLI_HOLD;
595 bool stale = bip->bli_flags & XFS_BLI_STALE;
596 #if defined(DEBUG) || defined(XFS_WARN)
597 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
598 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
599 bool aborted = test_bit(XFS_LI_ABORTED,
603 trace_xfs_buf_item_release(bip);
606 * The bli dirty state should match whether the blf has logged segments
607 * except for ordered buffers, where only the bli should be dirty.
609 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
610 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
611 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
614 * Clear the buffer's association with this transaction and
615 * per-transaction state from the bli, which has been copied above.
618 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
621 * Unref the item and unlock the buffer unless held or stale. Stale
622 * buffers remain locked until final unpin unless the bli is freed by
623 * the unref call. The latter implies shutdown because buffer
624 * invalidation dirties the bli and transaction.
626 released = xfs_buf_item_put(bip);
627 if (hold || (stale && !released))
629 ASSERT(!stale || aborted);
634 xfs_buf_item_committing(
635 struct xfs_log_item *lip,
636 xfs_lsn_t commit_lsn)
638 return xfs_buf_item_release(lip);
642 * This is called to find out where the oldest active copy of the
643 * buf log item in the on disk log resides now that the last log
644 * write of it completed at the given lsn.
645 * We always re-log all the dirty data in a buffer, so usually the
646 * latest copy in the on disk log is the only one that matters. For
647 * those cases we simply return the given lsn.
649 * The one exception to this is for buffers full of newly allocated
650 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
651 * flag set, indicating that only the di_next_unlinked fields from the
652 * inodes in the buffers will be replayed during recovery. If the
653 * original newly allocated inode images have not yet been flushed
654 * when the buffer is so relogged, then we need to make sure that we
655 * keep the old images in the 'active' portion of the log. We do this
656 * by returning the original lsn of that transaction here rather than
660 xfs_buf_item_committed(
661 struct xfs_log_item *lip,
664 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
666 trace_xfs_buf_item_committed(bip);
668 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
673 static const struct xfs_item_ops xfs_buf_item_ops = {
674 .iop_size = xfs_buf_item_size,
675 .iop_format = xfs_buf_item_format,
676 .iop_pin = xfs_buf_item_pin,
677 .iop_unpin = xfs_buf_item_unpin,
678 .iop_release = xfs_buf_item_release,
679 .iop_committing = xfs_buf_item_committing,
680 .iop_committed = xfs_buf_item_committed,
681 .iop_push = xfs_buf_item_push,
685 xfs_buf_item_get_format(
686 struct xfs_buf_log_item *bip,
689 ASSERT(bip->bli_formats == NULL);
690 bip->bli_format_count = count;
693 bip->bli_formats = &bip->__bli_format;
697 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
702 xfs_buf_item_free_format(
703 struct xfs_buf_log_item *bip)
705 if (bip->bli_formats != &bip->__bli_format) {
706 kmem_free(bip->bli_formats);
707 bip->bli_formats = NULL;
712 * Allocate a new buf log item to go with the given buffer.
713 * Set the buffer's b_log_item field to point to the new
719 struct xfs_mount *mp)
721 struct xfs_buf_log_item *bip = bp->b_log_item;
727 * Check to see if there is already a buf log item for
728 * this buffer. If we do already have one, there is
729 * nothing to do here so return.
731 ASSERT(bp->b_mount == mp);
733 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
734 ASSERT(!bp->b_transp);
735 ASSERT(bip->bli_buf == bp);
739 bip = kmem_zone_zalloc(xfs_buf_item_zone, 0);
740 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
744 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
745 * can be divided into. Make sure not to truncate any pieces.
746 * map_size is the size of the bitmap needed to describe the
747 * chunks of the buffer.
749 * Discontiguous buffer support follows the layout of the underlying
750 * buffer. This makes the implementation as simple as possible.
752 xfs_buf_item_get_format(bip, bp->b_map_count);
754 for (i = 0; i < bip->bli_format_count; i++) {
755 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
757 map_size = DIV_ROUND_UP(chunks, NBWORD);
759 if (map_size > XFS_BLF_DATAMAP_SIZE) {
760 kmem_cache_free(xfs_buf_item_zone, bip);
762 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
764 BBTOB(bp->b_maps[i].bm_len));
765 return -EFSCORRUPTED;
768 bip->bli_formats[i].blf_type = XFS_LI_BUF;
769 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
770 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
771 bip->bli_formats[i].blf_map_size = map_size;
774 bp->b_log_item = bip;
781 * Mark bytes first through last inclusive as dirty in the buf
785 xfs_buf_item_log_segment(
800 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
801 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
804 * Convert byte offsets to bit numbers.
806 first_bit = first >> XFS_BLF_SHIFT;
807 last_bit = last >> XFS_BLF_SHIFT;
810 * Calculate the total number of bits to be set.
812 bits_to_set = last_bit - first_bit + 1;
815 * Get a pointer to the first word in the bitmap
818 word_num = first_bit >> BIT_TO_WORD_SHIFT;
819 wordp = &map[word_num];
822 * Calculate the starting bit in the first word.
824 bit = first_bit & (uint)(NBWORD - 1);
827 * First set any bits in the first word of our range.
828 * If it starts at bit 0 of the word, it will be
829 * set below rather than here. That is what the variable
830 * bit tells us. The variable bits_set tracks the number
831 * of bits that have been set so far. End_bit is the number
832 * of the last bit to be set in this word plus one.
835 end_bit = min(bit + bits_to_set, (uint)NBWORD);
836 mask = ((1U << (end_bit - bit)) - 1) << bit;
839 bits_set = end_bit - bit;
845 * Now set bits a whole word at a time that are between
846 * first_bit and last_bit.
848 while ((bits_to_set - bits_set) >= NBWORD) {
855 * Finally, set any bits left to be set in one last partial word.
857 end_bit = bits_to_set - bits_set;
859 mask = (1U << end_bit) - 1;
865 * Mark bytes first through last inclusive as dirty in the buf
870 struct xfs_buf_log_item *bip,
877 struct xfs_buf *bp = bip->bli_buf;
880 * walk each buffer segment and mark them dirty appropriately.
883 for (i = 0; i < bip->bli_format_count; i++) {
886 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
888 /* skip to the map that includes the first byte to log */
890 start += BBTOB(bp->b_maps[i].bm_len);
895 * Trim the range to this segment and mark it in the bitmap.
896 * Note that we must convert buffer offsets to segment relative
897 * offsets (e.g., the first byte of each segment is byte 0 of
904 xfs_buf_item_log_segment(first - start, end - start,
905 &bip->bli_formats[i].blf_data_map[0]);
907 start += BBTOB(bp->b_maps[i].bm_len);
913 * Return true if the buffer has any ranges logged/dirtied by a transaction,
917 xfs_buf_item_dirty_format(
918 struct xfs_buf_log_item *bip)
922 for (i = 0; i < bip->bli_format_count; i++) {
923 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
924 bip->bli_formats[i].blf_map_size))
933 struct xfs_buf_log_item *bip)
935 xfs_buf_item_free_format(bip);
936 kmem_free(bip->bli_item.li_lv_shadow);
937 kmem_cache_free(xfs_buf_item_zone, bip);
941 * This is called when the buf log item is no longer needed. It should
942 * free the buf log item associated with the given buffer and clear
943 * the buffer's pointer to the buf log item. If there are no more
944 * items in the list, clear the b_iodone field of the buffer (see
945 * xfs_buf_attach_iodone() below).
951 struct xfs_buf_log_item *bip = bp->b_log_item;
953 trace_xfs_buf_item_relse(bp, _RET_IP_);
954 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
956 bp->b_log_item = NULL;
957 if (list_empty(&bp->b_li_list))
961 xfs_buf_item_free(bip);
966 * Add the given log item with its callback to the list of callbacks
967 * to be called when the buffer's I/O completes. If it is not set
968 * already, set the buffer's b_iodone() routine to be
969 * xfs_buf_iodone_callbacks() and link the log item into the list of
970 * items rooted at b_li_list.
973 xfs_buf_attach_iodone(
975 void (*cb)(struct xfs_buf *, struct xfs_log_item *),
976 struct xfs_log_item *lip)
978 ASSERT(xfs_buf_islocked(bp));
981 list_add_tail(&lip->li_bio_list, &bp->b_li_list);
983 ASSERT(bp->b_iodone == NULL ||
984 bp->b_iodone == xfs_buf_iodone_callbacks);
985 bp->b_iodone = xfs_buf_iodone_callbacks;
989 * We can have many callbacks on a buffer. Running the callbacks individually
990 * can cause a lot of contention on the AIL lock, so we allow for a single
991 * callback to be able to scan the remaining items in bp->b_li_list for other
992 * items of the same type and callback to be processed in the first call.
994 * As a result, the loop walking the callback list below will also modify the
995 * list. it removes the first item from the list and then runs the callback.
996 * The loop then restarts from the new first item int the list. This allows the
997 * callback to scan and modify the list attached to the buffer and we don't
998 * have to care about maintaining a next item pointer.
1001 xfs_buf_do_callbacks(
1004 struct xfs_buf_log_item *blip = bp->b_log_item;
1005 struct xfs_log_item *lip;
1007 /* If there is a buf_log_item attached, run its callback */
1009 lip = &blip->bli_item;
1010 lip->li_cb(bp, lip);
1013 while (!list_empty(&bp->b_li_list)) {
1014 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
1018 * Remove the item from the list, so we don't have any
1019 * confusion if the item is added to another buf.
1020 * Don't touch the log item after calling its
1021 * callback, because it could have freed itself.
1023 list_del_init(&lip->li_bio_list);
1024 lip->li_cb(bp, lip);
1029 * Invoke the error state callback for each log item affected by the failed I/O.
1031 * If a metadata buffer write fails with a non-permanent error, the buffer is
1032 * eventually resubmitted and so the completion callbacks are not run. The error
1033 * state may need to be propagated to the log items attached to the buffer,
1034 * however, so the next AIL push of the item knows hot to handle it correctly.
1037 xfs_buf_do_callbacks_fail(
1040 struct xfs_log_item *lip;
1041 struct xfs_ail *ailp;
1044 * Buffer log item errors are handled directly by xfs_buf_item_push()
1045 * and xfs_buf_iodone_callback_error, and they have no IO error
1046 * callbacks. Check only for items in b_li_list.
1048 if (list_empty(&bp->b_li_list))
1051 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
1053 ailp = lip->li_ailp;
1054 spin_lock(&ailp->ail_lock);
1055 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1056 if (lip->li_ops->iop_error)
1057 lip->li_ops->iop_error(lip, bp);
1059 spin_unlock(&ailp->ail_lock);
1063 xfs_buf_iodone_callback_error(
1066 struct xfs_buf_log_item *bip = bp->b_log_item;
1067 struct xfs_log_item *lip;
1068 struct xfs_mount *mp;
1069 static ulong lasttime;
1070 static xfs_buftarg_t *lasttarg;
1071 struct xfs_error_cfg *cfg;
1074 * The failed buffer might not have a buf_log_item attached or the
1075 * log_item list might be empty. Get the mp from the available
1078 lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item,
1080 mp = lip ? lip->li_mountp : bip->bli_item.li_mountp;
1083 * If we've already decided to shutdown the filesystem because of
1084 * I/O errors, there's no point in giving this a retry.
1086 if (XFS_FORCED_SHUTDOWN(mp))
1089 if (bp->b_target != lasttarg ||
1090 time_after(jiffies, (lasttime + 5*HZ))) {
1092 xfs_buf_ioerror_alert(bp, __this_address);
1094 lasttarg = bp->b_target;
1096 /* synchronous writes will have callers process the error */
1097 if (!(bp->b_flags & XBF_ASYNC))
1100 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1101 ASSERT(bp->b_iodone != NULL);
1103 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1106 * If the write was asynchronous then no one will be looking for the
1107 * error. If this is the first failure of this type, clear the error
1108 * state and write the buffer out again. This means we always retry an
1109 * async write failure at least once, but we also need to set the buffer
1110 * up to behave correctly now for repeated failures.
1112 if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) ||
1113 bp->b_last_error != bp->b_error) {
1114 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1115 bp->b_last_error = bp->b_error;
1116 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1117 !bp->b_first_retry_time)
1118 bp->b_first_retry_time = jiffies;
1120 xfs_buf_ioerror(bp, 0);
1126 * Repeated failure on an async write. Take action according to the
1127 * error configuration we have been set up to use.
1130 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1131 ++bp->b_retries > cfg->max_retries)
1132 goto permanent_error;
1133 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1134 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1135 goto permanent_error;
1137 /* At unmount we may treat errors differently */
1138 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
1139 goto permanent_error;
1142 * Still a transient error, run IO completion failure callbacks and let
1143 * the higher layers retry the buffer.
1145 xfs_buf_do_callbacks_fail(bp);
1146 xfs_buf_ioerror(bp, 0);
1151 * Permanent error - we need to trigger a shutdown if we haven't already
1152 * to indicate that inconsistency will result from this action.
1155 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1158 bp->b_flags |= XBF_DONE;
1159 trace_xfs_buf_error_relse(bp, _RET_IP_);
1164 * This is the iodone() function for buffers which have had callbacks attached
1165 * to them by xfs_buf_attach_iodone(). We need to iterate the items on the
1166 * callback list, mark the buffer as having no more callbacks and then push the
1167 * buffer through IO completion processing.
1170 xfs_buf_iodone_callbacks(
1174 * If there is an error, process it. Some errors require us
1175 * to run callbacks after failure processing is done so we
1176 * detect that and take appropriate action.
1178 if (bp->b_error && xfs_buf_iodone_callback_error(bp))
1182 * Successful IO or permanent error. Either way, we can clear the
1183 * retry state here in preparation for the next error that may occur.
1185 bp->b_last_error = 0;
1187 bp->b_first_retry_time = 0;
1189 xfs_buf_do_callbacks(bp);
1190 bp->b_log_item = NULL;
1191 list_del_init(&bp->b_li_list);
1192 bp->b_iodone = NULL;
1197 * This is the iodone() function for buffers which have been
1198 * logged. It is called when they are eventually flushed out.
1199 * It should remove the buf item from the AIL, and free the buf item.
1200 * It is called by xfs_buf_iodone_callbacks() above which will take
1201 * care of cleaning up the buffer itself.
1206 struct xfs_log_item *lip)
1208 struct xfs_ail *ailp = lip->li_ailp;
1210 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1215 * If we are forcibly shutting down, this may well be
1216 * off the AIL already. That's because we simulate the
1217 * log-committed callbacks to unpin these buffers. Or we may never
1218 * have put this item on AIL because of the transaction was
1219 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1221 * Either way, AIL is useless if we're forcing a shutdown.
1223 spin_lock(&ailp->ail_lock);
1224 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1225 xfs_buf_item_free(BUF_ITEM(lip));