1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode_item.h"
16 #include "xfs_alloc.h"
17 #include "xfs_error.h"
18 #include "xfs_iomap.h"
19 #include "xfs_trace.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_reflink.h"
24 #include <linux/writeback.h>
27 * structure owned by writepages passed to individual writepage calls
29 struct xfs_writepage_ctx {
30 struct xfs_bmbt_irec imap;
33 struct xfs_ioend *ioend;
37 xfs_find_bdev_for_inode(
40 struct xfs_inode *ip = XFS_I(inode);
41 struct xfs_mount *mp = ip->i_mount;
43 if (XFS_IS_REALTIME_INODE(ip))
44 return mp->m_rtdev_targp->bt_bdev;
46 return mp->m_ddev_targp->bt_bdev;
50 xfs_find_daxdev_for_inode(
53 struct xfs_inode *ip = XFS_I(inode);
54 struct xfs_mount *mp = ip->i_mount;
56 if (XFS_IS_REALTIME_INODE(ip))
57 return mp->m_rtdev_targp->bt_daxdev;
59 return mp->m_ddev_targp->bt_daxdev;
63 xfs_finish_page_writeback(
68 struct iomap_page *iop = to_iomap_page(bvec->bv_page);
71 SetPageError(bvec->bv_page);
72 mapping_set_error(inode->i_mapping, -EIO);
75 ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
76 ASSERT(!iop || atomic_read(&iop->write_count) > 0);
78 if (!iop || atomic_dec_and_test(&iop->write_count))
79 end_page_writeback(bvec->bv_page);
83 * We're now finished for good with this ioend structure. Update the page
84 * state, release holds on bios, and finally free up memory. Do not use the
89 struct xfs_ioend *ioend,
92 struct inode *inode = ioend->io_inode;
93 struct bio *bio = &ioend->io_inline_bio;
94 struct bio *last = ioend->io_bio, *next;
95 u64 start = bio->bi_iter.bi_sector;
96 bool quiet = bio_flagged(bio, BIO_QUIET);
98 for (bio = &ioend->io_inline_bio; bio; bio = next) {
101 struct bvec_iter_all iter_all;
104 * For the last bio, bi_private points to the ioend, so we
105 * need to explicitly end the iteration here.
110 next = bio->bi_private;
112 /* walk each page on bio, ending page IO on them */
113 bio_for_each_segment_all(bvec, bio, i, iter_all)
114 xfs_finish_page_writeback(inode, bvec, error);
118 if (unlikely(error && !quiet)) {
119 xfs_err_ratelimited(XFS_I(inode)->i_mount,
120 "writeback error on sector %llu", start);
125 * Fast and loose check if this write could update the on-disk inode size.
127 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
129 return ioend->io_offset + ioend->io_size >
130 XFS_I(ioend->io_inode)->i_d.di_size;
134 xfs_setfilesize_trans_alloc(
135 struct xfs_ioend *ioend)
137 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
138 struct xfs_trans *tp;
141 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
142 XFS_TRANS_NOFS, &tp);
146 ioend->io_append_trans = tp;
149 * We may pass freeze protection with a transaction. So tell lockdep
152 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
154 * We hand off the transaction to the completion thread now, so
155 * clear the flag here.
157 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
162 * Update on-disk file size now that data has been written to disk.
166 struct xfs_inode *ip,
167 struct xfs_trans *tp,
173 xfs_ilock(ip, XFS_ILOCK_EXCL);
174 isize = xfs_new_eof(ip, offset + size);
176 xfs_iunlock(ip, XFS_ILOCK_EXCL);
177 xfs_trans_cancel(tp);
181 trace_xfs_setfilesize(ip, offset, size);
183 ip->i_d.di_size = isize;
184 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
185 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
187 return xfs_trans_commit(tp);
192 struct xfs_inode *ip,
196 struct xfs_mount *mp = ip->i_mount;
197 struct xfs_trans *tp;
200 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
204 return __xfs_setfilesize(ip, tp, offset, size);
208 xfs_setfilesize_ioend(
209 struct xfs_ioend *ioend,
212 struct xfs_inode *ip = XFS_I(ioend->io_inode);
213 struct xfs_trans *tp = ioend->io_append_trans;
216 * The transaction may have been allocated in the I/O submission thread,
217 * thus we need to mark ourselves as being in a transaction manually.
218 * Similarly for freeze protection.
220 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
221 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
223 /* we abort the update if there was an IO error */
225 xfs_trans_cancel(tp);
229 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
233 * IO write completion.
237 struct work_struct *work)
239 struct xfs_ioend *ioend =
240 container_of(work, struct xfs_ioend, io_work);
241 struct xfs_inode *ip = XFS_I(ioend->io_inode);
242 xfs_off_t offset = ioend->io_offset;
243 size_t size = ioend->io_size;
247 * Just clean up the in-memory strutures if the fs has been shut down.
249 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
255 * Clean up any COW blocks on an I/O error.
257 error = blk_status_to_errno(ioend->io_bio->bi_status);
258 if (unlikely(error)) {
259 switch (ioend->io_type) {
261 xfs_reflink_cancel_cow_range(ip, offset, size, true);
269 * Success: commit the COW or unwritten blocks if needed.
271 switch (ioend->io_type) {
273 error = xfs_reflink_end_cow(ip, offset, size);
275 case XFS_IO_UNWRITTEN:
276 /* writeback should never update isize */
277 error = xfs_iomap_write_unwritten(ip, offset, size, false);
280 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
285 if (ioend->io_append_trans)
286 error = xfs_setfilesize_ioend(ioend, error);
287 xfs_destroy_ioend(ioend, error);
294 struct xfs_ioend *ioend = bio->bi_private;
295 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
297 if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
298 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
299 else if (ioend->io_append_trans)
300 queue_work(mp->m_data_workqueue, &ioend->io_work);
302 xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
307 struct xfs_writepage_ctx *wpc,
311 struct xfs_inode *ip = XFS_I(inode);
312 struct xfs_mount *mp = ip->i_mount;
313 ssize_t count = i_blocksize(inode);
314 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb;
315 xfs_fileoff_t cow_fsb = NULLFILEOFF;
316 struct xfs_bmbt_irec imap;
317 int whichfork = XFS_DATA_FORK;
318 struct xfs_iext_cursor icur;
323 * We have to make sure the cached mapping is within EOF to protect
324 * against eofblocks trimming on file release leaving us with a stale
325 * mapping. Otherwise, a page for a subsequent file extending buffered
326 * write could get picked up by this writeback cycle and written to the
329 * Note that what we really want here is a generic mapping invalidation
330 * mechanism to protect us from arbitrary extent modifying contexts, not
333 xfs_trim_extent_eof(&wpc->imap, ip);
336 * COW fork blocks can overlap data fork blocks even if the blocks
337 * aren't shared. COW I/O always takes precedent, so we must always
338 * check for overlap on reflink inodes unless the mapping is already a
339 * COW one, or the COW fork hasn't changed from the last time we looked
342 * It's safe to check the COW fork if_seq here without the ILOCK because
343 * we've indirectly protected against concurrent updates: writeback has
344 * the page locked, which prevents concurrent invalidations by reflink
345 * and directio and prevents concurrent buffered writes to the same
346 * page. Changes to if_seq always happen under i_lock, which protects
347 * against concurrent updates and provides a memory barrier on the way
348 * out that ensures that we always see the current value.
350 imap_valid = offset_fsb >= wpc->imap.br_startoff &&
351 offset_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount;
353 (!xfs_inode_has_cow_data(ip) ||
354 wpc->io_type == XFS_IO_COW ||
355 wpc->cow_seq == READ_ONCE(ip->i_cowfp->if_seq)))
358 if (XFS_FORCED_SHUTDOWN(mp))
362 * If we don't have a valid map, now it's time to get a new one for this
363 * offset. This will convert delayed allocations (including COW ones)
364 * into real extents. If we return without a valid map, it means we
365 * landed in a hole and we skip the block.
367 xfs_ilock(ip, XFS_ILOCK_SHARED);
368 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
369 (ip->i_df.if_flags & XFS_IFEXTENTS));
370 ASSERT(offset <= mp->m_super->s_maxbytes);
372 if (offset > mp->m_super->s_maxbytes - count)
373 count = mp->m_super->s_maxbytes - offset;
374 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
377 * Check if this is offset is covered by a COW extents, and if yes use
378 * it directly instead of looking up anything in the data fork.
380 if (xfs_inode_has_cow_data(ip) &&
381 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
382 cow_fsb = imap.br_startoff;
383 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
384 wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
385 xfs_iunlock(ip, XFS_ILOCK_SHARED);
387 * Truncate can race with writeback since writeback doesn't
388 * take the iolock and truncate decreases the file size before
389 * it starts truncating the pages between new_size and old_size.
390 * Therefore, we can end up in the situation where writeback
391 * gets a CoW fork mapping but the truncate makes the mapping
392 * invalid and we end up in here trying to get a new mapping.
393 * bail out here so that we simply never get a valid mapping
394 * and so we drop the write altogether. The page truncation
395 * will kill the contents anyway.
397 if (offset > i_size_read(inode)) {
398 wpc->io_type = XFS_IO_HOLE;
401 whichfork = XFS_COW_FORK;
402 wpc->io_type = XFS_IO_COW;
403 goto allocate_blocks;
407 * Map valid and no COW extent in the way? We're done.
410 xfs_iunlock(ip, XFS_ILOCK_SHARED);
415 * If we don't have a valid map, now it's time to get a new one for this
416 * offset. This will convert delayed allocations (including COW ones)
419 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
420 imap.br_startoff = end_fsb; /* fake a hole past EOF */
421 xfs_iunlock(ip, XFS_ILOCK_SHARED);
423 if (imap.br_startoff > offset_fsb) {
424 /* landed in a hole or beyond EOF */
425 imap.br_blockcount = imap.br_startoff - offset_fsb;
426 imap.br_startoff = offset_fsb;
427 imap.br_startblock = HOLESTARTBLOCK;
428 wpc->io_type = XFS_IO_HOLE;
431 * Truncate to the next COW extent if there is one. This is the
432 * only opportunity to do this because we can skip COW fork
433 * lookups for the subsequent blocks in the mapping; however,
434 * the requirement to treat the COW range separately remains.
436 if (cow_fsb != NULLFILEOFF &&
437 cow_fsb < imap.br_startoff + imap.br_blockcount)
438 imap.br_blockcount = cow_fsb - imap.br_startoff;
440 if (isnullstartblock(imap.br_startblock)) {
441 /* got a delalloc extent */
442 wpc->io_type = XFS_IO_DELALLOC;
443 goto allocate_blocks;
446 if (imap.br_state == XFS_EXT_UNWRITTEN)
447 wpc->io_type = XFS_IO_UNWRITTEN;
449 wpc->io_type = XFS_IO_OVERWRITE;
453 xfs_trim_extent_eof(&wpc->imap, ip);
454 trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
457 error = xfs_iomap_write_allocate(ip, whichfork, offset, &imap,
461 ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
462 imap.br_startoff + imap.br_blockcount <= cow_fsb);
464 xfs_trim_extent_eof(&wpc->imap, ip);
465 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
470 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
471 * it, and we submit that bio. The ioend may be used for multiple bio
472 * submissions, so we only want to allocate an append transaction for the ioend
473 * once. In the case of multiple bio submission, each bio will take an IO
474 * reference to the ioend to ensure that the ioend completion is only done once
475 * all bios have been submitted and the ioend is really done.
477 * If @fail is non-zero, it means that we have a situation where some part of
478 * the submission process has failed after we have marked paged for writeback
479 * and unlocked them. In this situation, we need to fail the bio and ioend
480 * rather than submit it to IO. This typically only happens on a filesystem
485 struct writeback_control *wbc,
486 struct xfs_ioend *ioend,
489 /* Convert CoW extents to regular */
490 if (!status && ioend->io_type == XFS_IO_COW) {
492 * Yuk. This can do memory allocation, but is not a
493 * transactional operation so everything is done in GFP_KERNEL
494 * context. That can deadlock, because we hold pages in
495 * writeback state and GFP_KERNEL allocations can block on them.
496 * Hence we must operate in nofs conditions here.
500 nofs_flag = memalloc_nofs_save();
501 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
502 ioend->io_offset, ioend->io_size);
503 memalloc_nofs_restore(nofs_flag);
506 /* Reserve log space if we might write beyond the on-disk inode size. */
508 ioend->io_type != XFS_IO_UNWRITTEN &&
509 xfs_ioend_is_append(ioend) &&
510 !ioend->io_append_trans)
511 status = xfs_setfilesize_trans_alloc(ioend);
513 ioend->io_bio->bi_private = ioend;
514 ioend->io_bio->bi_end_io = xfs_end_bio;
515 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
518 * If we are failing the IO now, just mark the ioend with an
519 * error and finish it. This will run IO completion immediately
520 * as there is only one reference to the ioend at this point in
524 ioend->io_bio->bi_status = errno_to_blk_status(status);
525 bio_endio(ioend->io_bio);
529 ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
530 submit_bio(ioend->io_bio);
534 static struct xfs_ioend *
539 struct block_device *bdev,
542 struct xfs_ioend *ioend;
545 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
546 bio_set_dev(bio, bdev);
547 bio->bi_iter.bi_sector = sector;
549 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
550 INIT_LIST_HEAD(&ioend->io_list);
551 ioend->io_type = type;
552 ioend->io_inode = inode;
554 ioend->io_offset = offset;
555 INIT_WORK(&ioend->io_work, xfs_end_io);
556 ioend->io_append_trans = NULL;
562 * Allocate a new bio, and chain the old bio to the new one.
564 * Note that we have to do perform the chaining in this unintuitive order
565 * so that the bi_private linkage is set up in the right direction for the
566 * traversal in xfs_destroy_ioend().
570 struct xfs_ioend *ioend,
571 struct writeback_control *wbc,
572 struct block_device *bdev,
577 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
578 bio_set_dev(new, bdev);
579 new->bi_iter.bi_sector = sector;
580 bio_chain(ioend->io_bio, new);
581 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
582 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
583 ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
584 submit_bio(ioend->io_bio);
589 * Test to see if we have an existing ioend structure that we could append to
590 * first, otherwise finish off the current ioend and start another.
597 struct iomap_page *iop,
598 struct xfs_writepage_ctx *wpc,
599 struct writeback_control *wbc,
600 struct list_head *iolist)
602 struct xfs_inode *ip = XFS_I(inode);
603 struct xfs_mount *mp = ip->i_mount;
604 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
605 unsigned len = i_blocksize(inode);
606 unsigned poff = offset & (PAGE_SIZE - 1);
609 sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
610 ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
612 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
613 sector != bio_end_sector(wpc->ioend->io_bio) ||
614 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
616 list_add(&wpc->ioend->io_list, iolist);
617 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset,
621 if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, true)) {
623 atomic_inc(&iop->write_count);
624 if (bio_full(wpc->ioend->io_bio))
625 xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
626 bio_add_page(wpc->ioend->io_bio, page, len, poff);
629 wpc->ioend->io_size += len;
633 xfs_vm_invalidatepage(
638 trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
639 iomap_invalidatepage(page, offset, length);
643 * If the page has delalloc blocks on it, we need to punch them out before we
644 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
645 * inode that can trip up a later direct I/O read operation on the same region.
647 * We prevent this by truncating away the delalloc regions on the page. Because
648 * they are delalloc, we can do this without needing a transaction. Indeed - if
649 * we get ENOSPC errors, we have to be able to do this truncation without a
650 * transaction as there is no space left for block reservation (typically why we
651 * see a ENOSPC in writeback).
654 xfs_aops_discard_page(
657 struct inode *inode = page->mapping->host;
658 struct xfs_inode *ip = XFS_I(inode);
659 struct xfs_mount *mp = ip->i_mount;
660 loff_t offset = page_offset(page);
661 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
664 if (XFS_FORCED_SHUTDOWN(mp))
668 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
669 page, ip->i_ino, offset);
671 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
672 PAGE_SIZE / i_blocksize(inode));
673 if (error && !XFS_FORCED_SHUTDOWN(mp))
674 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
676 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
680 * We implement an immediate ioend submission policy here to avoid needing to
681 * chain multiple ioends and hence nest mempool allocations which can violate
682 * forward progress guarantees we need to provide. The current ioend we are
683 * adding blocks to is cached on the writepage context, and if the new block
684 * does not append to the cached ioend it will create a new ioend and cache that
687 * If a new ioend is created and cached, the old ioend is returned and queued
688 * locally for submission once the entire page is processed or an error has been
689 * detected. While ioends are submitted immediately after they are completed,
690 * batching optimisations are provided by higher level block plugging.
692 * At the end of a writeback pass, there will be a cached ioend remaining on the
693 * writepage context that the caller will need to submit.
697 struct xfs_writepage_ctx *wpc,
698 struct writeback_control *wbc,
703 LIST_HEAD(submit_list);
704 struct iomap_page *iop = to_iomap_page(page);
705 unsigned len = i_blocksize(inode);
706 struct xfs_ioend *ioend, *next;
707 uint64_t file_offset; /* file offset of page */
708 int error = 0, count = 0, i;
710 ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
711 ASSERT(!iop || atomic_read(&iop->write_count) == 0);
714 * Walk through the page to find areas to write back. If we run off the
715 * end of the current map or find the current map invalid, grab a new
718 for (i = 0, file_offset = page_offset(page);
719 i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
720 i++, file_offset += len) {
721 if (iop && !test_bit(i, iop->uptodate))
724 error = xfs_map_blocks(wpc, inode, file_offset);
727 if (wpc->io_type == XFS_IO_HOLE)
729 xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
734 ASSERT(wpc->ioend || list_empty(&submit_list));
735 ASSERT(PageLocked(page));
736 ASSERT(!PageWriteback(page));
739 * On error, we have to fail the ioend here because we may have set
740 * pages under writeback, we have to make sure we run IO completion to
741 * mark the error state of the IO appropriately, so we can't cancel the
742 * ioend directly here. That means we have to mark this page as under
743 * writeback if we included any blocks from it in the ioend chain so
744 * that completion treats it correctly.
746 * If we didn't include the page in the ioend, the on error we can
747 * simply discard and unlock it as there are no other users of the page
748 * now. The caller will still need to trigger submission of outstanding
749 * ioends on the writepage context so they are treated correctly on
752 if (unlikely(error)) {
754 xfs_aops_discard_page(page);
755 ClearPageUptodate(page);
761 * If the page was not fully cleaned, we need to ensure that the
762 * higher layers come back to it correctly. That means we need
763 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
764 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
765 * so another attempt to write this page in this writeback sweep
768 set_page_writeback_keepwrite(page);
770 clear_page_dirty_for_io(page);
771 set_page_writeback(page);
777 * Preserve the original error if there was one, otherwise catch
778 * submission errors here and propagate into subsequent ioend
781 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
784 list_del_init(&ioend->io_list);
785 error2 = xfs_submit_ioend(wbc, ioend, error);
786 if (error2 && !error)
791 * We can end up here with no error and nothing to write only if we race
792 * with a partial page truncate on a sub-page block sized filesystem.
795 end_page_writeback(page);
797 mapping_set_error(page->mapping, error);
802 * Write out a dirty page.
804 * For delalloc space on the page we need to allocate space and flush it.
805 * For unwritten space on the page we need to start the conversion to
806 * regular allocated space.
811 struct writeback_control *wbc,
814 struct xfs_writepage_ctx *wpc = data;
815 struct inode *inode = page->mapping->host;
820 trace_xfs_writepage(inode, page, 0, 0);
823 * Refuse to write the page out if we are called from reclaim context.
825 * This avoids stack overflows when called from deeply used stacks in
826 * random callers for direct reclaim or memcg reclaim. We explicitly
827 * allow reclaim from kswapd as the stack usage there is relatively low.
829 * This should never happen except in the case of a VM regression so
832 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
837 * Given that we do not allow direct reclaim to call us, we should
838 * never be called while in a filesystem transaction.
840 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
844 * Is this page beyond the end of the file?
846 * The page index is less than the end_index, adjust the end_offset
847 * to the highest offset that this page should represent.
848 * -----------------------------------------------------
849 * | file mapping | <EOF> |
850 * -----------------------------------------------------
851 * | Page ... | Page N-2 | Page N-1 | Page N | |
852 * ^--------------------------------^----------|--------
853 * | desired writeback range | see else |
854 * ---------------------------------^------------------|
856 offset = i_size_read(inode);
857 end_index = offset >> PAGE_SHIFT;
858 if (page->index < end_index)
859 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
862 * Check whether the page to write out is beyond or straddles
864 * -------------------------------------------------------
865 * | file mapping | <EOF> |
866 * -------------------------------------------------------
867 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
868 * ^--------------------------------^-----------|---------
870 * ---------------------------------^-----------|--------|
872 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
875 * Skip the page if it is fully outside i_size, e.g. due to a
876 * truncate operation that is in progress. We must redirty the
877 * page so that reclaim stops reclaiming it. Otherwise
878 * xfs_vm_releasepage() is called on it and gets confused.
880 * Note that the end_index is unsigned long, it would overflow
881 * if the given offset is greater than 16TB on 32-bit system
882 * and if we do check the page is fully outside i_size or not
883 * via "if (page->index >= end_index + 1)" as "end_index + 1"
884 * will be evaluated to 0. Hence this page will be redirtied
885 * and be written out repeatedly which would result in an
886 * infinite loop, the user program that perform this operation
887 * will hang. Instead, we can verify this situation by checking
888 * if the page to write is totally beyond the i_size or if it's
889 * offset is just equal to the EOF.
891 if (page->index > end_index ||
892 (page->index == end_index && offset_into_page == 0))
896 * The page straddles i_size. It must be zeroed out on each
897 * and every writepage invocation because it may be mmapped.
898 * "A file is mapped in multiples of the page size. For a file
899 * that is not a multiple of the page size, the remaining
900 * memory is zeroed when mapped, and writes to that region are
901 * not written out to the file."
903 zero_user_segment(page, offset_into_page, PAGE_SIZE);
905 /* Adjust the end_offset to the end of file */
909 return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
912 redirty_page_for_writepage(wbc, page);
920 struct writeback_control *wbc)
922 struct xfs_writepage_ctx wpc = {
923 .io_type = XFS_IO_HOLE,
927 ret = xfs_do_writepage(page, wbc, &wpc);
929 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
935 struct address_space *mapping,
936 struct writeback_control *wbc)
938 struct xfs_writepage_ctx wpc = {
939 .io_type = XFS_IO_HOLE,
943 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
944 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
946 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
952 struct address_space *mapping,
953 struct writeback_control *wbc)
955 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
956 return dax_writeback_mapping_range(mapping,
957 xfs_find_bdev_for_inode(mapping->host), wbc);
965 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
966 return iomap_releasepage(page, gfp_mask);
971 struct address_space *mapping,
974 struct xfs_inode *ip = XFS_I(mapping->host);
976 trace_xfs_vm_bmap(ip);
979 * The swap code (ab-)uses ->bmap to get a block mapping and then
980 * bypasses the file system for actual I/O. We really can't allow
981 * that on reflinks inodes, so we have to skip out here. And yes,
982 * 0 is the magic code for a bmap error.
984 * Since we don't pass back blockdev info, we can't return bmap
985 * information for rt files either.
987 if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
989 return iomap_bmap(mapping, block, &xfs_iomap_ops);
997 trace_xfs_vm_readpage(page->mapping->host, 1);
998 return iomap_readpage(page, &xfs_iomap_ops);
1003 struct file *unused,
1004 struct address_space *mapping,
1005 struct list_head *pages,
1008 trace_xfs_vm_readpages(mapping->host, nr_pages);
1009 return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
1013 xfs_iomap_swapfile_activate(
1014 struct swap_info_struct *sis,
1015 struct file *swap_file,
1018 sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
1019 return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
1022 const struct address_space_operations xfs_address_space_operations = {
1023 .readpage = xfs_vm_readpage,
1024 .readpages = xfs_vm_readpages,
1025 .writepage = xfs_vm_writepage,
1026 .writepages = xfs_vm_writepages,
1027 .set_page_dirty = iomap_set_page_dirty,
1028 .releasepage = xfs_vm_releasepage,
1029 .invalidatepage = xfs_vm_invalidatepage,
1030 .bmap = xfs_vm_bmap,
1031 .direct_IO = noop_direct_IO,
1032 .migratepage = iomap_migrate_page,
1033 .is_partially_uptodate = iomap_is_partially_uptodate,
1034 .error_remove_page = generic_error_remove_page,
1035 .swap_activate = xfs_iomap_swapfile_activate,
1038 const struct address_space_operations xfs_dax_aops = {
1039 .writepages = xfs_dax_writepages,
1040 .direct_IO = noop_direct_IO,
1041 .set_page_dirty = noop_set_page_dirty,
1042 .invalidatepage = noop_invalidatepage,
1043 .swap_activate = xfs_iomap_swapfile_activate,