1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
22 #include "../internal.h"
25 * Structure allocated for each page or THP when block size < page size
26 * to track sub-page uptodate status and I/O completions.
29 atomic_t read_bytes_pending;
30 atomic_t write_bytes_pending;
31 spinlock_t uptodate_lock;
32 unsigned long uptodate[];
35 static inline struct iomap_page *to_iomap_page(struct page *page)
38 * per-block data is stored in the head page. Callers should
39 * not be dealing with tail pages (and if they are, they can
40 * call thp_head() first.
42 VM_BUG_ON_PGFLAGS(PageTail(page), page);
44 if (page_has_private(page))
45 return (struct iomap_page *)page_private(page);
49 static struct bio_set iomap_ioend_bioset;
51 static struct iomap_page *
52 iomap_page_create(struct inode *inode, struct page *page)
54 struct iomap_page *iop = to_iomap_page(page);
55 unsigned int nr_blocks = i_blocks_per_page(inode, page);
57 if (iop || nr_blocks <= 1)
60 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
61 GFP_NOFS | __GFP_NOFAIL);
62 spin_lock_init(&iop->uptodate_lock);
63 attach_page_private(page, iop);
68 iomap_page_release(struct page *page)
70 struct iomap_page *iop = detach_page_private(page);
71 unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
75 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
76 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
77 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
83 * Calculate the range inside the page that we actually need to read.
86 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
87 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
89 loff_t orig_pos = *pos;
90 loff_t isize = i_size_read(inode);
91 unsigned block_bits = inode->i_blkbits;
92 unsigned block_size = (1 << block_bits);
93 unsigned poff = offset_in_page(*pos);
94 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
95 unsigned first = poff >> block_bits;
96 unsigned last = (poff + plen - 1) >> block_bits;
99 * If the block size is smaller than the page size we need to check the
100 * per-block uptodate status and adjust the offset and length if needed
101 * to avoid reading in already uptodate ranges.
106 /* move forward for each leading block marked uptodate */
107 for (i = first; i <= last; i++) {
108 if (!test_bit(i, iop->uptodate))
116 /* truncate len if we find any trailing uptodate block(s) */
117 for ( ; i <= last; i++) {
118 if (test_bit(i, iop->uptodate)) {
119 plen -= (last - i + 1) * block_size;
127 * If the extent spans the block that contains the i_size we need to
128 * handle both halves separately so that we properly zero data in the
129 * page cache for blocks that are entirely outside of i_size.
131 if (orig_pos <= isize && orig_pos + length > isize) {
132 unsigned end = offset_in_page(isize - 1) >> block_bits;
134 if (first <= end && last > end)
135 plen -= (last - end) * block_size;
143 iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
145 struct iomap_page *iop = to_iomap_page(page);
146 struct inode *inode = page->mapping->host;
147 unsigned first = off >> inode->i_blkbits;
148 unsigned last = (off + len - 1) >> inode->i_blkbits;
151 spin_lock_irqsave(&iop->uptodate_lock, flags);
152 bitmap_set(iop->uptodate, first, last - first + 1);
153 if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
154 SetPageUptodate(page);
155 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
159 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
164 if (page_has_private(page))
165 iomap_iop_set_range_uptodate(page, off, len);
167 SetPageUptodate(page);
171 iomap_read_page_end_io(struct bio_vec *bvec, int error)
173 struct page *page = bvec->bv_page;
174 struct iomap_page *iop = to_iomap_page(page);
176 if (unlikely(error)) {
177 ClearPageUptodate(page);
180 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
183 if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
188 iomap_read_end_io(struct bio *bio)
190 int error = blk_status_to_errno(bio->bi_status);
191 struct bio_vec *bvec;
192 struct bvec_iter_all iter_all;
194 bio_for_each_segment_all(bvec, bio, iter_all)
195 iomap_read_page_end_io(bvec, error);
199 struct iomap_readpage_ctx {
200 struct page *cur_page;
201 bool cur_page_in_bio;
203 struct readahead_control *rac;
207 iomap_read_inline_data(struct inode *inode, struct page *page,
210 size_t size = i_size_read(inode);
213 if (PageUptodate(page))
217 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
219 addr = kmap_atomic(page);
220 memcpy(addr, iomap->inline_data, size);
221 memset(addr + size, 0, PAGE_SIZE - size);
223 SetPageUptodate(page);
226 static inline bool iomap_block_needs_zeroing(struct inode *inode,
227 struct iomap *iomap, loff_t pos)
229 return iomap->type != IOMAP_MAPPED ||
230 (iomap->flags & IOMAP_F_NEW) ||
231 pos >= i_size_read(inode);
235 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
236 struct iomap *iomap, struct iomap *srcmap)
238 struct iomap_readpage_ctx *ctx = data;
239 struct page *page = ctx->cur_page;
240 struct iomap_page *iop = iomap_page_create(inode, page);
241 bool same_page = false, is_contig = false;
242 loff_t orig_pos = pos;
246 if (iomap->type == IOMAP_INLINE) {
248 iomap_read_inline_data(inode, page, iomap);
252 /* zero post-eof blocks as the page may be mapped */
253 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
257 if (iomap_block_needs_zeroing(inode, iomap, pos)) {
258 zero_user(page, poff, plen);
259 iomap_set_range_uptodate(page, poff, plen);
263 ctx->cur_page_in_bio = true;
265 atomic_add(plen, &iop->read_bytes_pending);
267 /* Try to merge into a previous segment if we can */
268 sector = iomap_sector(iomap, pos);
269 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
270 if (__bio_try_merge_page(ctx->bio, page, plen, poff,
276 if (!is_contig || bio_full(ctx->bio, plen)) {
277 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
278 gfp_t orig_gfp = gfp;
279 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
282 submit_bio(ctx->bio);
284 if (ctx->rac) /* same as readahead_gfp_mask */
285 gfp |= __GFP_NORETRY | __GFP_NOWARN;
286 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
288 * If the bio_alloc fails, try it again for a single page to
289 * avoid having to deal with partial page reads. This emulates
290 * what do_mpage_readpage does.
293 ctx->bio = bio_alloc(orig_gfp, 1);
294 ctx->bio->bi_opf = REQ_OP_READ;
296 ctx->bio->bi_opf |= REQ_RAHEAD;
297 ctx->bio->bi_iter.bi_sector = sector;
298 bio_set_dev(ctx->bio, iomap->bdev);
299 ctx->bio->bi_end_io = iomap_read_end_io;
302 bio_add_page(ctx->bio, page, plen, poff);
305 * Move the caller beyond our range so that it keeps making progress.
306 * For that we have to include any leading non-uptodate ranges, but
307 * we can skip trailing ones as they will be handled in the next
310 return pos - orig_pos + plen;
314 iomap_readpage(struct page *page, const struct iomap_ops *ops)
316 struct iomap_readpage_ctx ctx = { .cur_page = page };
317 struct inode *inode = page->mapping->host;
321 trace_iomap_readpage(page->mapping->host, 1);
323 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
324 ret = iomap_apply(inode, page_offset(page) + poff,
325 PAGE_SIZE - poff, 0, ops, &ctx,
326 iomap_readpage_actor);
328 WARN_ON_ONCE(ret == 0);
336 WARN_ON_ONCE(!ctx.cur_page_in_bio);
338 WARN_ON_ONCE(ctx.cur_page_in_bio);
343 * Just like mpage_readahead and block_read_full_page we always
344 * return 0 and just mark the page as PageError on errors. This
345 * should be cleaned up all through the stack eventually.
349 EXPORT_SYMBOL_GPL(iomap_readpage);
352 iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
353 void *data, struct iomap *iomap, struct iomap *srcmap)
355 struct iomap_readpage_ctx *ctx = data;
358 for (done = 0; done < length; done += ret) {
359 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
360 if (!ctx->cur_page_in_bio)
361 unlock_page(ctx->cur_page);
362 put_page(ctx->cur_page);
363 ctx->cur_page = NULL;
365 if (!ctx->cur_page) {
366 ctx->cur_page = readahead_page(ctx->rac);
367 ctx->cur_page_in_bio = false;
369 ret = iomap_readpage_actor(inode, pos + done, length - done,
377 * iomap_readahead - Attempt to read pages from a file.
378 * @rac: Describes the pages to be read.
379 * @ops: The operations vector for the filesystem.
381 * This function is for filesystems to call to implement their readahead
382 * address_space operation.
384 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
385 * blocks from disc), and may wait for it. The caller may be trying to
386 * access a different page, and so sleeping excessively should be avoided.
387 * It may allocate memory, but should avoid costly allocations. This
388 * function is called with memalloc_nofs set, so allocations will not cause
389 * the filesystem to be reentered.
391 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
393 struct inode *inode = rac->mapping->host;
394 loff_t pos = readahead_pos(rac);
395 loff_t length = readahead_length(rac);
396 struct iomap_readpage_ctx ctx = {
400 trace_iomap_readahead(inode, readahead_count(rac));
403 loff_t ret = iomap_apply(inode, pos, length, 0, ops,
404 &ctx, iomap_readahead_actor);
406 WARN_ON_ONCE(ret == 0);
416 if (!ctx.cur_page_in_bio)
417 unlock_page(ctx.cur_page);
418 put_page(ctx.cur_page);
421 EXPORT_SYMBOL_GPL(iomap_readahead);
424 * iomap_is_partially_uptodate checks whether blocks within a page are
427 * Returns true if all blocks which correspond to a file portion
428 * we want to read within the page are uptodate.
431 iomap_is_partially_uptodate(struct page *page, unsigned long from,
434 struct iomap_page *iop = to_iomap_page(page);
435 struct inode *inode = page->mapping->host;
436 unsigned len, first, last;
439 /* Limit range to one page */
440 len = min_t(unsigned, PAGE_SIZE - from, count);
442 /* First and last blocks in range within page */
443 first = from >> inode->i_blkbits;
444 last = (from + len - 1) >> inode->i_blkbits;
447 for (i = first; i <= last; i++)
448 if (!test_bit(i, iop->uptodate))
455 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
458 iomap_releasepage(struct page *page, gfp_t gfp_mask)
460 trace_iomap_releasepage(page->mapping->host, page_offset(page),
464 * mm accommodates an old ext3 case where clean pages might not have had
465 * the dirty bit cleared. Thus, it can send actual dirty pages to
466 * ->releasepage() via shrink_active_list(), skip those here.
468 if (PageDirty(page) || PageWriteback(page))
470 iomap_page_release(page);
473 EXPORT_SYMBOL_GPL(iomap_releasepage);
476 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
478 trace_iomap_invalidatepage(page->mapping->host, offset, len);
481 * If we are invalidating the entire page, clear the dirty state from it
482 * and release it to avoid unnecessary buildup of the LRU.
484 if (offset == 0 && len == PAGE_SIZE) {
485 WARN_ON_ONCE(PageWriteback(page));
486 cancel_dirty_page(page);
487 iomap_page_release(page);
490 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
492 #ifdef CONFIG_MIGRATION
494 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
495 struct page *page, enum migrate_mode mode)
499 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
500 if (ret != MIGRATEPAGE_SUCCESS)
503 if (page_has_private(page))
504 attach_page_private(newpage, detach_page_private(page));
506 if (mode != MIGRATE_SYNC_NO_COPY)
507 migrate_page_copy(newpage, page);
509 migrate_page_states(newpage, page);
510 return MIGRATEPAGE_SUCCESS;
512 EXPORT_SYMBOL_GPL(iomap_migrate_page);
513 #endif /* CONFIG_MIGRATION */
516 IOMAP_WRITE_F_UNSHARE = (1 << 0),
520 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
522 loff_t i_size = i_size_read(inode);
525 * Only truncate newly allocated pages beyoned EOF, even if the
526 * write started inside the existing inode size.
528 if (pos + len > i_size)
529 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
533 iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
534 unsigned plen, struct iomap *iomap)
539 bio_init(&bio, &bvec, 1);
540 bio.bi_opf = REQ_OP_READ;
541 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
542 bio_set_dev(&bio, iomap->bdev);
543 __bio_add_page(&bio, page, plen, poff);
544 return submit_bio_wait(&bio);
548 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
549 struct page *page, struct iomap *srcmap)
551 struct iomap_page *iop = iomap_page_create(inode, page);
552 loff_t block_size = i_blocksize(inode);
553 loff_t block_start = round_down(pos, block_size);
554 loff_t block_end = round_up(pos + len, block_size);
555 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
557 if (PageUptodate(page))
559 ClearPageError(page);
562 iomap_adjust_read_range(inode, iop, &block_start,
563 block_end - block_start, &poff, &plen);
567 if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
568 (from <= poff || from >= poff + plen) &&
569 (to <= poff || to >= poff + plen))
572 if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
573 if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
575 zero_user_segments(page, poff, from, to, poff + plen);
577 int status = iomap_read_page_sync(block_start, page,
582 iomap_set_range_uptodate(page, poff, plen);
583 } while ((block_start += plen) < block_end);
589 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
590 struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
592 const struct iomap_page_ops *page_ops = iomap->page_ops;
596 BUG_ON(pos + len > iomap->offset + iomap->length);
598 BUG_ON(pos + len > srcmap->offset + srcmap->length);
600 if (fatal_signal_pending(current))
603 if (page_ops && page_ops->page_prepare) {
604 status = page_ops->page_prepare(inode, pos, len, iomap);
609 page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
616 if (srcmap->type == IOMAP_INLINE)
617 iomap_read_inline_data(inode, page, srcmap);
618 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
619 status = __block_write_begin_int(page, pos, len, NULL, srcmap);
621 status = __iomap_write_begin(inode, pos, len, flags, page,
624 if (unlikely(status))
633 iomap_write_failed(inode, pos, len);
636 if (page_ops && page_ops->page_done)
637 page_ops->page_done(inode, pos, 0, NULL, iomap);
642 iomap_set_page_dirty(struct page *page)
644 struct address_space *mapping = page_mapping(page);
647 if (unlikely(!mapping))
648 return !TestSetPageDirty(page);
651 * Lock out page->mem_cgroup migration to keep PageDirty
652 * synchronized with per-memcg dirty page counters.
654 lock_page_memcg(page);
655 newly_dirty = !TestSetPageDirty(page);
657 __set_page_dirty(page, mapping, 0);
658 unlock_page_memcg(page);
661 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
664 EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
666 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
667 size_t copied, struct page *page)
669 flush_dcache_page(page);
672 * The blocks that were entirely written will now be uptodate, so we
673 * don't have to worry about a readpage reading them and overwriting a
674 * partial write. However if we have encountered a short write and only
675 * partially written into a block, it will not be marked uptodate, so a
676 * readpage might come in and destroy our partial write.
678 * Do the simplest thing, and just treat any short write to a non
679 * uptodate page as a zero-length write, and force the caller to redo
682 if (unlikely(copied < len && !PageUptodate(page)))
684 iomap_set_range_uptodate(page, offset_in_page(pos), len);
685 iomap_set_page_dirty(page);
689 static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
690 struct iomap *iomap, loff_t pos, size_t copied)
694 WARN_ON_ONCE(!PageUptodate(page));
695 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
697 flush_dcache_page(page);
698 addr = kmap_atomic(page);
699 memcpy(iomap->inline_data + pos, addr + pos, copied);
702 mark_inode_dirty(inode);
706 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
707 static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
708 size_t copied, struct page *page, struct iomap *iomap,
709 struct iomap *srcmap)
711 const struct iomap_page_ops *page_ops = iomap->page_ops;
712 loff_t old_size = inode->i_size;
715 if (srcmap->type == IOMAP_INLINE) {
716 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
717 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
718 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
721 ret = __iomap_write_end(inode, pos, len, copied, page);
725 * Update the in-memory inode size after copying the data into the page
726 * cache. It's up to the file system to write the updated size to disk,
727 * preferably after I/O completion so that no stale data is exposed.
729 if (pos + ret > old_size) {
730 i_size_write(inode, pos + ret);
731 iomap->flags |= IOMAP_F_SIZE_CHANGED;
736 pagecache_isize_extended(inode, old_size, pos);
737 if (page_ops && page_ops->page_done)
738 page_ops->page_done(inode, pos, ret, page, iomap);
742 iomap_write_failed(inode, pos, len);
747 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
748 struct iomap *iomap, struct iomap *srcmap)
750 struct iov_iter *i = data;
756 unsigned long offset; /* Offset into pagecache page */
757 unsigned long bytes; /* Bytes to write to page */
758 size_t copied; /* Bytes copied from user */
760 offset = offset_in_page(pos);
761 bytes = min_t(unsigned long, PAGE_SIZE - offset,
768 * Bring in the user page that we will copy from _first_.
769 * Otherwise there's a nasty deadlock on copying from the
770 * same page as we're writing to, without it being marked
773 * Not only is this an optimisation, but it is also required
774 * to check that the address is actually valid, when atomic
775 * usercopies are used, below.
777 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
782 status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
784 if (unlikely(status))
787 if (mapping_writably_mapped(inode->i_mapping))
788 flush_dcache_page(page);
790 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
792 copied = iomap_write_end(inode, pos, bytes, copied, page, iomap,
797 iov_iter_advance(i, copied);
798 if (unlikely(copied == 0)) {
800 * If we were unable to copy any data at all, we must
801 * fall back to a single segment length write.
803 * If we didn't fallback here, we could livelock
804 * because not all segments in the iov can be copied at
805 * once without a pagefault.
807 bytes = min_t(unsigned long, PAGE_SIZE - offset,
808 iov_iter_single_seg_count(i));
815 balance_dirty_pages_ratelimited(inode->i_mapping);
816 } while (iov_iter_count(i) && length);
818 return written ? written : status;
822 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
823 const struct iomap_ops *ops)
825 struct inode *inode = iocb->ki_filp->f_mapping->host;
826 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
828 while (iov_iter_count(iter)) {
829 ret = iomap_apply(inode, pos, iov_iter_count(iter),
830 IOMAP_WRITE, ops, iter, iomap_write_actor);
837 return written ? written : ret;
839 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
842 iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
843 struct iomap *iomap, struct iomap *srcmap)
848 /* don't bother with blocks that are not shared to start with */
849 if (!(iomap->flags & IOMAP_F_SHARED))
851 /* don't bother with holes or unwritten extents */
852 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
856 unsigned long offset = offset_in_page(pos);
857 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
860 status = iomap_write_begin(inode, pos, bytes,
861 IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
862 if (unlikely(status))
865 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
867 if (WARN_ON_ONCE(status == 0))
876 balance_dirty_pages_ratelimited(inode->i_mapping);
883 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
884 const struct iomap_ops *ops)
889 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
890 iomap_unshare_actor);
899 EXPORT_SYMBOL_GPL(iomap_file_unshare);
901 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
902 unsigned bytes, struct iomap *iomap, struct iomap *srcmap)
907 status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
911 zero_user(page, offset, bytes);
912 mark_page_accessed(page);
914 return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
918 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
919 void *data, struct iomap *iomap, struct iomap *srcmap)
921 bool *did_zero = data;
925 /* already zeroed? we're done. */
926 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
930 unsigned offset, bytes;
932 offset = offset_in_page(pos);
933 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
936 status = dax_iomap_zero(pos, offset, bytes, iomap);
938 status = iomap_zero(inode, pos, offset, bytes, iomap,
954 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
955 const struct iomap_ops *ops)
960 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
961 ops, did_zero, iomap_zero_range_actor);
971 EXPORT_SYMBOL_GPL(iomap_zero_range);
974 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
975 const struct iomap_ops *ops)
977 unsigned int blocksize = i_blocksize(inode);
978 unsigned int off = pos & (blocksize - 1);
980 /* Block boundary? Nothing to do */
983 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
985 EXPORT_SYMBOL_GPL(iomap_truncate_page);
988 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
989 void *data, struct iomap *iomap, struct iomap *srcmap)
991 struct page *page = data;
994 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
995 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
998 block_commit_write(page, 0, length);
1000 WARN_ON_ONCE(!PageUptodate(page));
1001 iomap_page_create(inode, page);
1002 set_page_dirty(page);
1008 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1010 struct page *page = vmf->page;
1011 struct inode *inode = file_inode(vmf->vma->vm_file);
1012 unsigned long length;
1017 ret = page_mkwrite_check_truncate(page, inode);
1022 offset = page_offset(page);
1023 while (length > 0) {
1024 ret = iomap_apply(inode, offset, length,
1025 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1026 iomap_page_mkwrite_actor);
1027 if (unlikely(ret <= 0))
1033 wait_for_stable_page(page);
1034 return VM_FAULT_LOCKED;
1037 return block_page_mkwrite_return(ret);
1039 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1042 iomap_finish_page_writeback(struct inode *inode, struct page *page,
1043 int error, unsigned int len)
1045 struct iomap_page *iop = to_iomap_page(page);
1049 mapping_set_error(inode->i_mapping, -EIO);
1052 WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
1053 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
1055 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
1056 end_page_writeback(page);
1060 * We're now finished for good with this ioend structure. Update the page
1061 * state, release holds on bios, and finally free up memory. Do not use the
1065 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1067 struct inode *inode = ioend->io_inode;
1068 struct bio *bio = &ioend->io_inline_bio;
1069 struct bio *last = ioend->io_bio, *next;
1070 u64 start = bio->bi_iter.bi_sector;
1071 loff_t offset = ioend->io_offset;
1072 bool quiet = bio_flagged(bio, BIO_QUIET);
1074 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1076 struct bvec_iter_all iter_all;
1079 * For the last bio, bi_private points to the ioend, so we
1080 * need to explicitly end the iteration here.
1085 next = bio->bi_private;
1087 /* walk each page on bio, ending page IO on them */
1088 bio_for_each_segment_all(bv, bio, iter_all)
1089 iomap_finish_page_writeback(inode, bv->bv_page, error,
1093 /* The ioend has been freed by bio_put() */
1095 if (unlikely(error && !quiet)) {
1096 printk_ratelimited(KERN_ERR
1097 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1098 inode->i_sb->s_id, inode->i_ino, offset, start);
1103 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1105 struct list_head tmp;
1107 list_replace_init(&ioend->io_list, &tmp);
1108 iomap_finish_ioend(ioend, error);
1110 while (!list_empty(&tmp)) {
1111 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1112 list_del_init(&ioend->io_list);
1113 iomap_finish_ioend(ioend, error);
1116 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1119 * We can merge two adjacent ioends if they have the same set of work to do.
1122 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1124 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1126 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1127 (next->io_flags & IOMAP_F_SHARED))
1129 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1130 (next->io_type == IOMAP_UNWRITTEN))
1132 if (ioend->io_offset + ioend->io_size != next->io_offset)
1138 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends,
1139 void (*merge_private)(struct iomap_ioend *ioend,
1140 struct iomap_ioend *next))
1142 struct iomap_ioend *next;
1144 INIT_LIST_HEAD(&ioend->io_list);
1146 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1148 if (!iomap_ioend_can_merge(ioend, next))
1150 list_move_tail(&next->io_list, &ioend->io_list);
1151 ioend->io_size += next->io_size;
1152 if (next->io_private && merge_private)
1153 merge_private(ioend, next);
1156 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1159 iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
1161 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1162 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1164 if (ia->io_offset < ib->io_offset)
1166 if (ia->io_offset > ib->io_offset)
1172 iomap_sort_ioends(struct list_head *ioend_list)
1174 list_sort(NULL, ioend_list, iomap_ioend_compare);
1176 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1178 static void iomap_writepage_end_bio(struct bio *bio)
1180 struct iomap_ioend *ioend = bio->bi_private;
1182 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1186 * Submit the final bio for an ioend.
1188 * If @error is non-zero, it means that we have a situation where some part of
1189 * the submission process has failed after we have marked paged for writeback
1190 * and unlocked them. In this situation, we need to fail the bio instead of
1191 * submitting it. This typically only happens on a filesystem shutdown.
1194 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1197 ioend->io_bio->bi_private = ioend;
1198 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1200 if (wpc->ops->prepare_ioend)
1201 error = wpc->ops->prepare_ioend(ioend, error);
1204 * If we are failing the IO now, just mark the ioend with an
1205 * error and finish it. This will run IO completion immediately
1206 * as there is only one reference to the ioend at this point in
1209 ioend->io_bio->bi_status = errno_to_blk_status(error);
1210 bio_endio(ioend->io_bio);
1214 submit_bio(ioend->io_bio);
1218 static struct iomap_ioend *
1219 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1220 loff_t offset, sector_t sector, struct writeback_control *wbc)
1222 struct iomap_ioend *ioend;
1225 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
1226 bio_set_dev(bio, wpc->iomap.bdev);
1227 bio->bi_iter.bi_sector = sector;
1228 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
1229 bio->bi_write_hint = inode->i_write_hint;
1230 wbc_init_bio(wbc, bio);
1232 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1233 INIT_LIST_HEAD(&ioend->io_list);
1234 ioend->io_type = wpc->iomap.type;
1235 ioend->io_flags = wpc->iomap.flags;
1236 ioend->io_inode = inode;
1238 ioend->io_offset = offset;
1239 ioend->io_private = NULL;
1240 ioend->io_bio = bio;
1245 * Allocate a new bio, and chain the old bio to the new one.
1247 * Note that we have to do perform the chaining in this unintuitive order
1248 * so that the bi_private linkage is set up in the right direction for the
1249 * traversal in iomap_finish_ioend().
1252 iomap_chain_bio(struct bio *prev)
1256 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
1257 bio_copy_dev(new, prev);/* also copies over blkcg information */
1258 new->bi_iter.bi_sector = bio_end_sector(prev);
1259 new->bi_opf = prev->bi_opf;
1260 new->bi_write_hint = prev->bi_write_hint;
1262 bio_chain(prev, new);
1263 bio_get(prev); /* for iomap_finish_ioend */
1269 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1272 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1273 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1275 if (wpc->iomap.type != wpc->ioend->io_type)
1277 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1279 if (sector != bio_end_sector(wpc->ioend->io_bio))
1285 * Test to see if we have an existing ioend structure that we could append to
1286 * first, otherwise finish off the current ioend and start another.
1289 iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
1290 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1291 struct writeback_control *wbc, struct list_head *iolist)
1293 sector_t sector = iomap_sector(&wpc->iomap, offset);
1294 unsigned len = i_blocksize(inode);
1295 unsigned poff = offset & (PAGE_SIZE - 1);
1296 bool merged, same_page = false;
1298 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
1300 list_add(&wpc->ioend->io_list, iolist);
1301 wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
1304 merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
1307 atomic_add(len, &iop->write_bytes_pending);
1310 if (bio_full(wpc->ioend->io_bio, len)) {
1311 wpc->ioend->io_bio =
1312 iomap_chain_bio(wpc->ioend->io_bio);
1314 bio_add_page(wpc->ioend->io_bio, page, len, poff);
1317 wpc->ioend->io_size += len;
1318 wbc_account_cgroup_owner(wbc, page, len);
1322 * We implement an immediate ioend submission policy here to avoid needing to
1323 * chain multiple ioends and hence nest mempool allocations which can violate
1324 * forward progress guarantees we need to provide. The current ioend we are
1325 * adding blocks to is cached on the writepage context, and if the new block
1326 * does not append to the cached ioend it will create a new ioend and cache that
1329 * If a new ioend is created and cached, the old ioend is returned and queued
1330 * locally for submission once the entire page is processed or an error has been
1331 * detected. While ioends are submitted immediately after they are completed,
1332 * batching optimisations are provided by higher level block plugging.
1334 * At the end of a writeback pass, there will be a cached ioend remaining on the
1335 * writepage context that the caller will need to submit.
1338 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1339 struct writeback_control *wbc, struct inode *inode,
1340 struct page *page, u64 end_offset)
1342 struct iomap_page *iop = to_iomap_page(page);
1343 struct iomap_ioend *ioend, *next;
1344 unsigned len = i_blocksize(inode);
1345 u64 file_offset; /* file offset of page */
1346 int error = 0, count = 0, i;
1347 LIST_HEAD(submit_list);
1349 WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
1350 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
1353 * Walk through the page to find areas to write back. If we run off the
1354 * end of the current map or find the current map invalid, grab a new
1357 for (i = 0, file_offset = page_offset(page);
1358 i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
1359 i++, file_offset += len) {
1360 if (iop && !test_bit(i, iop->uptodate))
1363 error = wpc->ops->map_blocks(wpc, inode, file_offset);
1366 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1368 if (wpc->iomap.type == IOMAP_HOLE)
1370 iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
1375 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1376 WARN_ON_ONCE(!PageLocked(page));
1377 WARN_ON_ONCE(PageWriteback(page));
1380 * We cannot cancel the ioend directly here on error. We may have
1381 * already set other pages under writeback and hence we have to run I/O
1382 * completion to mark the error state of the pages under writeback
1385 if (unlikely(error)) {
1388 * If the current page hasn't been added to ioend, it
1389 * won't be affected by I/O completions and we must
1390 * discard and unlock it right here.
1392 if (wpc->ops->discard_page)
1393 wpc->ops->discard_page(page);
1394 ClearPageUptodate(page);
1400 * If the page was not fully cleaned, we need to ensure that the
1401 * higher layers come back to it correctly. That means we need
1402 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
1403 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
1404 * so another attempt to write this page in this writeback sweep
1407 set_page_writeback_keepwrite(page);
1409 clear_page_dirty_for_io(page);
1410 set_page_writeback(page);
1416 * Preserve the original error if there was one, otherwise catch
1417 * submission errors here and propagate into subsequent ioend
1420 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1423 list_del_init(&ioend->io_list);
1424 error2 = iomap_submit_ioend(wpc, ioend, error);
1425 if (error2 && !error)
1430 * We can end up here with no error and nothing to write only if we race
1431 * with a partial page truncate on a sub-page block sized filesystem.
1434 end_page_writeback(page);
1436 mapping_set_error(page->mapping, error);
1441 * Write out a dirty page.
1443 * For delalloc space on the page we need to allocate space and flush it.
1444 * For unwritten space on the page we need to start the conversion to
1445 * regular allocated space.
1448 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1450 struct iomap_writepage_ctx *wpc = data;
1451 struct inode *inode = page->mapping->host;
1456 trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
1459 * Refuse to write the page out if we are called from reclaim context.
1461 * This avoids stack overflows when called from deeply used stacks in
1462 * random callers for direct reclaim or memcg reclaim. We explicitly
1463 * allow reclaim from kswapd as the stack usage there is relatively low.
1465 * This should never happen except in the case of a VM regression so
1468 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1473 * Given that we do not allow direct reclaim to call us, we should
1474 * never be called in a recursive filesystem reclaim context.
1476 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1480 * Is this page beyond the end of the file?
1482 * The page index is less than the end_index, adjust the end_offset
1483 * to the highest offset that this page should represent.
1484 * -----------------------------------------------------
1485 * | file mapping | <EOF> |
1486 * -----------------------------------------------------
1487 * | Page ... | Page N-2 | Page N-1 | Page N | |
1488 * ^--------------------------------^----------|--------
1489 * | desired writeback range | see else |
1490 * ---------------------------------^------------------|
1492 offset = i_size_read(inode);
1493 end_index = offset >> PAGE_SHIFT;
1494 if (page->index < end_index)
1495 end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
1498 * Check whether the page to write out is beyond or straddles
1500 * -------------------------------------------------------
1501 * | file mapping | <EOF> |
1502 * -------------------------------------------------------
1503 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1504 * ^--------------------------------^-----------|---------
1506 * ---------------------------------^-----------|--------|
1508 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1511 * Skip the page if it is fully outside i_size, e.g. due to a
1512 * truncate operation that is in progress. We must redirty the
1513 * page so that reclaim stops reclaiming it. Otherwise
1514 * iomap_vm_releasepage() is called on it and gets confused.
1516 * Note that the end_index is unsigned long, it would overflow
1517 * if the given offset is greater than 16TB on 32-bit system
1518 * and if we do check the page is fully outside i_size or not
1519 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1520 * will be evaluated to 0. Hence this page will be redirtied
1521 * and be written out repeatedly which would result in an
1522 * infinite loop, the user program that perform this operation
1523 * will hang. Instead, we can verify this situation by checking
1524 * if the page to write is totally beyond the i_size or if it's
1525 * offset is just equal to the EOF.
1527 if (page->index > end_index ||
1528 (page->index == end_index && offset_into_page == 0))
1532 * The page straddles i_size. It must be zeroed out on each
1533 * and every writepage invocation because it may be mmapped.
1534 * "A file is mapped in multiples of the page size. For a file
1535 * that is not a multiple of the page size, the remaining
1536 * memory is zeroed when mapped, and writes to that region are
1537 * not written out to the file."
1539 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1541 /* Adjust the end_offset to the end of file */
1542 end_offset = offset;
1545 return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
1548 redirty_page_for_writepage(wbc, page);
1554 iomap_writepage(struct page *page, struct writeback_control *wbc,
1555 struct iomap_writepage_ctx *wpc,
1556 const struct iomap_writeback_ops *ops)
1561 ret = iomap_do_writepage(page, wbc, wpc);
1564 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1566 EXPORT_SYMBOL_GPL(iomap_writepage);
1569 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1570 struct iomap_writepage_ctx *wpc,
1571 const struct iomap_writeback_ops *ops)
1576 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1579 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1581 EXPORT_SYMBOL_GPL(iomap_writepages);
1583 static int __init iomap_init(void)
1585 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1586 offsetof(struct iomap_ioend, io_inline_bio),
1589 fs_initcall(iomap_init);