1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
22 #include "../internal.h"
25 * Structure allocated for each folio when block size < folio size
26 * to track sub-folio uptodate status and I/O completions.
29 atomic_t read_bytes_pending;
30 atomic_t write_bytes_pending;
31 spinlock_t uptodate_lock;
32 unsigned long uptodate[];
35 static inline struct iomap_page *to_iomap_page(struct folio *folio)
37 if (folio_test_private(folio))
38 return folio_get_private(folio);
42 static struct bio_set iomap_ioend_bioset;
44 static struct iomap_page *
45 iomap_page_create(struct inode *inode, struct folio *folio)
47 struct iomap_page *iop = to_iomap_page(folio);
48 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
50 if (iop || nr_blocks <= 1)
53 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
54 GFP_NOFS | __GFP_NOFAIL);
55 spin_lock_init(&iop->uptodate_lock);
56 if (folio_test_uptodate(folio))
57 bitmap_fill(iop->uptodate, nr_blocks);
58 folio_attach_private(folio, iop);
62 static void iomap_page_release(struct folio *folio)
64 struct iomap_page *iop = folio_detach_private(folio);
65 struct inode *inode = folio->mapping->host;
66 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
70 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
71 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
72 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
73 folio_test_uptodate(folio));
78 * Calculate the range inside the folio that we actually need to read.
80 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
81 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
83 struct iomap_page *iop = to_iomap_page(folio);
84 loff_t orig_pos = *pos;
85 loff_t isize = i_size_read(inode);
86 unsigned block_bits = inode->i_blkbits;
87 unsigned block_size = (1 << block_bits);
88 size_t poff = offset_in_folio(folio, *pos);
89 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
90 unsigned first = poff >> block_bits;
91 unsigned last = (poff + plen - 1) >> block_bits;
94 * If the block size is smaller than the page size, we need to check the
95 * per-block uptodate status and adjust the offset and length if needed
96 * to avoid reading in already uptodate ranges.
101 /* move forward for each leading block marked uptodate */
102 for (i = first; i <= last; i++) {
103 if (!test_bit(i, iop->uptodate))
111 /* truncate len if we find any trailing uptodate block(s) */
112 for ( ; i <= last; i++) {
113 if (test_bit(i, iop->uptodate)) {
114 plen -= (last - i + 1) * block_size;
122 * If the extent spans the block that contains the i_size, we need to
123 * handle both halves separately so that we properly zero data in the
124 * page cache for blocks that are entirely outside of i_size.
126 if (orig_pos <= isize && orig_pos + length > isize) {
127 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
129 if (first <= end && last > end)
130 plen -= (last - end) * block_size;
137 static void iomap_iop_set_range_uptodate(struct folio *folio,
138 struct iomap_page *iop, size_t off, size_t len)
140 struct inode *inode = folio->mapping->host;
141 unsigned first = off >> inode->i_blkbits;
142 unsigned last = (off + len - 1) >> inode->i_blkbits;
145 spin_lock_irqsave(&iop->uptodate_lock, flags);
146 bitmap_set(iop->uptodate, first, last - first + 1);
147 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
148 folio_mark_uptodate(folio);
149 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
152 static void iomap_set_range_uptodate(struct folio *folio,
153 struct iomap_page *iop, size_t off, size_t len)
155 if (folio_test_error(folio))
159 iomap_iop_set_range_uptodate(folio, iop, off, len);
161 folio_mark_uptodate(folio);
164 static void iomap_finish_folio_read(struct folio *folio, size_t offset,
165 size_t len, int error)
167 struct iomap_page *iop = to_iomap_page(folio);
169 if (unlikely(error)) {
170 folio_clear_uptodate(folio);
171 folio_set_error(folio);
173 iomap_set_range_uptodate(folio, iop, offset, len);
176 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
180 static void iomap_read_end_io(struct bio *bio)
182 int error = blk_status_to_errno(bio->bi_status);
183 struct folio_iter fi;
185 bio_for_each_folio_all(fi, bio)
186 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
190 struct iomap_readpage_ctx {
191 struct folio *cur_folio;
192 bool cur_folio_in_bio;
194 struct readahead_control *rac;
198 * iomap_read_inline_data - copy inline data into the page cache
199 * @iter: iteration structure
200 * @folio: folio to copy to
202 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
203 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
204 * Returns zero for success to complete the read, or the usual negative errno.
206 static int iomap_read_inline_data(const struct iomap_iter *iter,
209 struct iomap_page *iop;
210 const struct iomap *iomap = iomap_iter_srcmap(iter);
211 size_t size = i_size_read(iter->inode) - iomap->offset;
212 size_t poff = offset_in_page(iomap->offset);
213 size_t offset = offset_in_folio(folio, iomap->offset);
216 if (folio_test_uptodate(folio))
219 if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
221 if (WARN_ON_ONCE(size > PAGE_SIZE -
222 offset_in_page(iomap->inline_data)))
224 if (WARN_ON_ONCE(size > iomap->length))
227 iop = iomap_page_create(iter->inode, folio);
229 iop = to_iomap_page(folio);
231 addr = kmap_local_folio(folio, offset);
232 memcpy(addr, iomap->inline_data, size);
233 memset(addr + size, 0, PAGE_SIZE - poff - size);
235 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
239 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
242 const struct iomap *srcmap = iomap_iter_srcmap(iter);
244 return srcmap->type != IOMAP_MAPPED ||
245 (srcmap->flags & IOMAP_F_NEW) ||
246 pos >= i_size_read(iter->inode);
249 static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
250 struct iomap_readpage_ctx *ctx, loff_t offset)
252 const struct iomap *iomap = &iter->iomap;
253 loff_t pos = iter->pos + offset;
254 loff_t length = iomap_length(iter) - offset;
255 struct folio *folio = ctx->cur_folio;
256 struct iomap_page *iop;
257 loff_t orig_pos = pos;
261 if (iomap->type == IOMAP_INLINE)
262 return iomap_read_inline_data(iter, folio);
264 /* zero post-eof blocks as the page may be mapped */
265 iop = iomap_page_create(iter->inode, folio);
266 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
270 if (iomap_block_needs_zeroing(iter, pos)) {
271 folio_zero_range(folio, poff, plen);
272 iomap_set_range_uptodate(folio, iop, poff, plen);
276 ctx->cur_folio_in_bio = true;
278 atomic_add(plen, &iop->read_bytes_pending);
280 sector = iomap_sector(iomap, pos);
282 bio_end_sector(ctx->bio) != sector ||
283 !bio_add_folio(ctx->bio, folio, plen, poff)) {
284 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
285 gfp_t orig_gfp = gfp;
286 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
289 submit_bio(ctx->bio);
291 if (ctx->rac) /* same as readahead_gfp_mask */
292 gfp |= __GFP_NORETRY | __GFP_NOWARN;
293 ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
295 * If the bio_alloc fails, try it again for a single page to
296 * avoid having to deal with partial page reads. This emulates
297 * what do_mpage_readpage does.
300 ctx->bio = bio_alloc(orig_gfp, 1);
301 ctx->bio->bi_opf = REQ_OP_READ;
303 ctx->bio->bi_opf |= REQ_RAHEAD;
304 ctx->bio->bi_iter.bi_sector = sector;
305 bio_set_dev(ctx->bio, iomap->bdev);
306 ctx->bio->bi_end_io = iomap_read_end_io;
307 bio_add_folio(ctx->bio, folio, plen, poff);
312 * Move the caller beyond our range so that it keeps making progress.
313 * For that, we have to include any leading non-uptodate ranges, but
314 * we can skip trailing ones as they will be handled in the next
317 return pos - orig_pos + plen;
321 iomap_readpage(struct page *page, const struct iomap_ops *ops)
323 struct folio *folio = page_folio(page);
324 struct iomap_iter iter = {
325 .inode = folio->mapping->host,
326 .pos = folio_pos(folio),
327 .len = folio_size(folio),
329 struct iomap_readpage_ctx ctx = {
334 trace_iomap_readpage(iter.inode, 1);
336 while ((ret = iomap_iter(&iter, ops)) > 0)
337 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
340 folio_set_error(folio);
344 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
346 WARN_ON_ONCE(ctx.cur_folio_in_bio);
351 * Just like mpage_readahead and block_read_full_page, we always
352 * return 0 and just mark the page as PageError on errors. This
353 * should be cleaned up throughout the stack eventually.
357 EXPORT_SYMBOL_GPL(iomap_readpage);
359 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
360 struct iomap_readpage_ctx *ctx)
362 loff_t length = iomap_length(iter);
365 for (done = 0; done < length; done += ret) {
366 if (ctx->cur_folio &&
367 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
368 if (!ctx->cur_folio_in_bio)
369 folio_unlock(ctx->cur_folio);
370 ctx->cur_folio = NULL;
372 if (!ctx->cur_folio) {
373 ctx->cur_folio = readahead_folio(ctx->rac);
374 ctx->cur_folio_in_bio = false;
376 ret = iomap_readpage_iter(iter, ctx, done);
385 * iomap_readahead - Attempt to read pages from a file.
386 * @rac: Describes the pages to be read.
387 * @ops: The operations vector for the filesystem.
389 * This function is for filesystems to call to implement their readahead
390 * address_space operation.
392 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
393 * blocks from disc), and may wait for it. The caller may be trying to
394 * access a different page, and so sleeping excessively should be avoided.
395 * It may allocate memory, but should avoid costly allocations. This
396 * function is called with memalloc_nofs set, so allocations will not cause
397 * the filesystem to be reentered.
399 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
401 struct iomap_iter iter = {
402 .inode = rac->mapping->host,
403 .pos = readahead_pos(rac),
404 .len = readahead_length(rac),
406 struct iomap_readpage_ctx ctx = {
410 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
412 while (iomap_iter(&iter, ops) > 0)
413 iter.processed = iomap_readahead_iter(&iter, &ctx);
418 if (!ctx.cur_folio_in_bio)
419 folio_unlock(ctx.cur_folio);
422 EXPORT_SYMBOL_GPL(iomap_readahead);
425 * iomap_is_partially_uptodate checks whether blocks within a page are
428 * Returns true if all blocks which correspond to a file portion
429 * we want to read within the page are uptodate.
432 iomap_is_partially_uptodate(struct page *page, unsigned long from,
435 struct folio *folio = page_folio(page);
436 struct iomap_page *iop = to_iomap_page(folio);
437 struct inode *inode = page->mapping->host;
438 unsigned len, first, last;
441 /* Limit range to one page */
442 len = min_t(unsigned, PAGE_SIZE - from, count);
444 /* First and last blocks in range within page */
445 first = from >> inode->i_blkbits;
446 last = (from + len - 1) >> inode->i_blkbits;
449 for (i = first; i <= last; i++)
450 if (!test_bit(i, iop->uptodate))
457 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
460 iomap_releasepage(struct page *page, gfp_t gfp_mask)
462 struct folio *folio = page_folio(page);
464 trace_iomap_releasepage(folio->mapping->host, folio_pos(folio),
468 * mm accommodates an old ext3 case where clean pages might not have had
469 * the dirty bit cleared. Thus, it can send actual dirty pages to
470 * ->releasepage() via shrink_active_list(); skip those here.
472 if (folio_test_dirty(folio) || folio_test_writeback(folio))
474 iomap_page_release(folio);
477 EXPORT_SYMBOL_GPL(iomap_releasepage);
479 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
481 trace_iomap_invalidatepage(folio->mapping->host, offset, len);
484 * If we're invalidating the entire folio, clear the dirty state
485 * from it and release it to avoid unnecessary buildup of the LRU.
487 if (offset == 0 && len == folio_size(folio)) {
488 WARN_ON_ONCE(folio_test_writeback(folio));
489 folio_cancel_dirty(folio);
490 iomap_page_release(folio);
491 } else if (folio_test_large(folio)) {
492 /* Must release the iop so the page can be split */
493 WARN_ON_ONCE(!folio_test_uptodate(folio) &&
494 folio_test_dirty(folio));
495 iomap_page_release(folio);
498 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
500 void iomap_invalidatepage(struct page *page, unsigned int offset,
503 iomap_invalidate_folio(page_folio(page), offset, len);
505 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
507 #ifdef CONFIG_MIGRATION
509 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
510 struct page *page, enum migrate_mode mode)
512 struct folio *folio = page_folio(page);
513 struct folio *newfolio = page_folio(newpage);
516 ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
517 if (ret != MIGRATEPAGE_SUCCESS)
520 if (folio_test_private(folio))
521 folio_attach_private(newfolio, folio_detach_private(folio));
523 if (mode != MIGRATE_SYNC_NO_COPY)
524 folio_migrate_copy(newfolio, folio);
526 folio_migrate_flags(newfolio, folio);
527 return MIGRATEPAGE_SUCCESS;
529 EXPORT_SYMBOL_GPL(iomap_migrate_page);
530 #endif /* CONFIG_MIGRATION */
533 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
535 loff_t i_size = i_size_read(inode);
538 * Only truncate newly allocated pages beyoned EOF, even if the
539 * write started inside the existing inode size.
541 if (pos + len > i_size)
542 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
545 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
546 size_t poff, size_t plen, const struct iomap *iomap)
551 bio_init(&bio, &bvec, 1);
552 bio.bi_opf = REQ_OP_READ;
553 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
554 bio_set_dev(&bio, iomap->bdev);
555 bio_add_folio(&bio, folio, plen, poff);
556 return submit_bio_wait(&bio);
559 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
560 size_t len, struct folio *folio)
562 const struct iomap *srcmap = iomap_iter_srcmap(iter);
563 struct iomap_page *iop = iomap_page_create(iter->inode, folio);
564 loff_t block_size = i_blocksize(iter->inode);
565 loff_t block_start = round_down(pos, block_size);
566 loff_t block_end = round_up(pos + len, block_size);
567 size_t from = offset_in_folio(folio, pos), to = from + len;
570 if (folio_test_uptodate(folio))
572 folio_clear_error(folio);
575 iomap_adjust_read_range(iter->inode, folio, &block_start,
576 block_end - block_start, &poff, &plen);
580 if (!(iter->flags & IOMAP_UNSHARE) &&
581 (from <= poff || from >= poff + plen) &&
582 (to <= poff || to >= poff + plen))
585 if (iomap_block_needs_zeroing(iter, block_start)) {
586 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
588 folio_zero_segments(folio, poff, from, to, poff + plen);
590 int status = iomap_read_folio_sync(block_start, folio,
595 iomap_set_range_uptodate(folio, iop, poff, plen);
596 } while ((block_start += plen) < block_end);
601 static int iomap_write_begin_inline(const struct iomap_iter *iter,
604 /* needs more work for the tailpacking case; disable for now */
605 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
607 return iomap_read_inline_data(iter, folio);
610 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
611 size_t len, struct folio **foliop)
613 const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
614 const struct iomap *srcmap = iomap_iter_srcmap(iter);
616 unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
619 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
620 if (srcmap != &iter->iomap)
621 BUG_ON(pos + len > srcmap->offset + srcmap->length);
623 if (fatal_signal_pending(current))
626 if (!mapping_large_folio_support(iter->inode->i_mapping))
627 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
629 if (page_ops && page_ops->page_prepare) {
630 status = page_ops->page_prepare(iter->inode, pos, len);
635 folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
636 fgp, mapping_gfp_mask(iter->inode->i_mapping));
641 if (pos + len > folio_pos(folio) + folio_size(folio))
642 len = folio_pos(folio) + folio_size(folio) - pos;
644 if (srcmap->type == IOMAP_INLINE)
645 status = iomap_write_begin_inline(iter, folio);
646 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
647 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
649 status = __iomap_write_begin(iter, pos, len, folio);
651 if (unlikely(status))
660 iomap_write_failed(iter->inode, pos, len);
663 if (page_ops && page_ops->page_done)
664 page_ops->page_done(iter->inode, pos, 0, NULL);
668 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
669 size_t copied, struct folio *folio)
671 struct iomap_page *iop = to_iomap_page(folio);
672 flush_dcache_folio(folio);
675 * The blocks that were entirely written will now be uptodate, so we
676 * don't have to worry about a readpage reading them and overwriting a
677 * partial write. However, if we've encountered a short write and only
678 * partially written into a block, it will not be marked uptodate, so a
679 * readpage might come in and destroy our partial write.
681 * Do the simplest thing and just treat any short write to a
682 * non-uptodate page as a zero-length write, and force the caller to
683 * redo the whole thing.
685 if (unlikely(copied < len && !folio_test_uptodate(folio)))
687 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
688 filemap_dirty_folio(inode->i_mapping, folio);
692 static size_t iomap_write_end_inline(const struct iomap_iter *iter,
693 struct folio *folio, loff_t pos, size_t copied)
695 const struct iomap *iomap = &iter->iomap;
698 WARN_ON_ONCE(!folio_test_uptodate(folio));
699 BUG_ON(!iomap_inline_data_valid(iomap));
701 flush_dcache_folio(folio);
702 addr = kmap_local_folio(folio, pos);
703 memcpy(iomap_inline_data(iomap, pos), addr, copied);
706 mark_inode_dirty(iter->inode);
710 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
711 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
712 size_t copied, struct folio *folio)
714 const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
715 const struct iomap *srcmap = iomap_iter_srcmap(iter);
716 loff_t old_size = iter->inode->i_size;
719 if (srcmap->type == IOMAP_INLINE) {
720 ret = iomap_write_end_inline(iter, folio, pos, copied);
721 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
722 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
723 copied, &folio->page, NULL);
725 ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
729 * Update the in-memory inode size after copying the data into the page
730 * cache. It's up to the file system to write the updated size to disk,
731 * preferably after I/O completion so that no stale data is exposed.
733 if (pos + ret > old_size) {
734 i_size_write(iter->inode, pos + ret);
735 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
740 pagecache_isize_extended(iter->inode, old_size, pos);
741 if (page_ops && page_ops->page_done)
742 page_ops->page_done(iter->inode, pos, ret, &folio->page);
746 iomap_write_failed(iter->inode, pos, len);
750 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
752 loff_t length = iomap_length(iter);
753 loff_t pos = iter->pos;
760 unsigned long offset; /* Offset into pagecache page */
761 unsigned long bytes; /* Bytes to write to page */
762 size_t copied; /* Bytes copied from user */
764 offset = offset_in_page(pos);
765 bytes = min_t(unsigned long, PAGE_SIZE - offset,
772 * Bring in the user page that we'll copy from _first_.
773 * Otherwise there's a nasty deadlock on copying from the
774 * same page as we're writing to, without it being marked
777 if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
782 status = iomap_write_begin(iter, pos, bytes, &folio);
783 if (unlikely(status))
786 page = folio_file_page(folio, pos >> PAGE_SHIFT);
787 if (mapping_writably_mapped(iter->inode->i_mapping))
788 flush_dcache_page(page);
790 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
792 status = iomap_write_end(iter, pos, bytes, copied, folio);
794 if (unlikely(copied != status))
795 iov_iter_revert(i, copied - status);
798 if (unlikely(status == 0)) {
800 * A short copy made iomap_write_end() reject the
801 * thing entirely. Might be memory poisoning
802 * halfway through, might be a race with munmap,
803 * might be severe memory pressure.
813 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
814 } while (iov_iter_count(i) && length);
816 return written ? written : status;
820 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
821 const struct iomap_ops *ops)
823 struct iomap_iter iter = {
824 .inode = iocb->ki_filp->f_mapping->host,
826 .len = iov_iter_count(i),
827 .flags = IOMAP_WRITE,
831 while ((ret = iomap_iter(&iter, ops)) > 0)
832 iter.processed = iomap_write_iter(&iter, i);
833 if (iter.pos == iocb->ki_pos)
835 return iter.pos - iocb->ki_pos;
837 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
839 static loff_t iomap_unshare_iter(struct iomap_iter *iter)
841 struct iomap *iomap = &iter->iomap;
842 const struct iomap *srcmap = iomap_iter_srcmap(iter);
843 loff_t pos = iter->pos;
844 loff_t length = iomap_length(iter);
848 /* don't bother with blocks that are not shared to start with */
849 if (!(iomap->flags & IOMAP_F_SHARED))
851 /* don't bother with holes or unwritten extents */
852 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
856 unsigned long offset = offset_in_page(pos);
857 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
860 status = iomap_write_begin(iter, pos, bytes, &folio);
861 if (unlikely(status))
864 status = iomap_write_end(iter, pos, bytes, bytes, folio);
865 if (WARN_ON_ONCE(status == 0))
874 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
881 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
882 const struct iomap_ops *ops)
884 struct iomap_iter iter = {
888 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
892 while ((ret = iomap_iter(&iter, ops)) > 0)
893 iter.processed = iomap_unshare_iter(&iter);
896 EXPORT_SYMBOL_GPL(iomap_file_unshare);
898 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
900 struct iomap *iomap = &iter->iomap;
901 const struct iomap *srcmap = iomap_iter_srcmap(iter);
902 loff_t pos = iter->pos;
903 loff_t length = iomap_length(iter);
906 /* already zeroed? we're done. */
907 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
914 size_t bytes = min_t(u64, SIZE_MAX, length);
916 if (IS_DAX(iter->inode)) {
917 s64 tmp = dax_iomap_zero(pos, bytes, iomap);
924 status = iomap_write_begin(iter, pos, bytes, &folio);
928 offset = offset_in_folio(folio, pos);
929 if (bytes > folio_size(folio) - offset)
930 bytes = folio_size(folio) - offset;
932 folio_zero_range(folio, offset, bytes);
933 folio_mark_accessed(folio);
935 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
937 if (WARN_ON_ONCE(bytes == 0))
945 } while (length > 0);
951 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
952 const struct iomap_ops *ops)
954 struct iomap_iter iter = {
962 while ((ret = iomap_iter(&iter, ops)) > 0)
963 iter.processed = iomap_zero_iter(&iter, did_zero);
966 EXPORT_SYMBOL_GPL(iomap_zero_range);
969 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
970 const struct iomap_ops *ops)
972 unsigned int blocksize = i_blocksize(inode);
973 unsigned int off = pos & (blocksize - 1);
975 /* Block boundary? Nothing to do */
978 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
980 EXPORT_SYMBOL_GPL(iomap_truncate_page);
982 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
985 loff_t length = iomap_length(iter);
988 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
989 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
993 block_commit_write(&folio->page, 0, length);
995 WARN_ON_ONCE(!folio_test_uptodate(folio));
996 folio_mark_dirty(folio);
1002 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1004 struct iomap_iter iter = {
1005 .inode = file_inode(vmf->vma->vm_file),
1006 .flags = IOMAP_WRITE | IOMAP_FAULT,
1008 struct folio *folio = page_folio(vmf->page);
1012 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1015 iter.pos = folio_pos(folio);
1017 while ((ret = iomap_iter(&iter, ops)) > 0)
1018 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1022 folio_wait_stable(folio);
1023 return VM_FAULT_LOCKED;
1025 folio_unlock(folio);
1026 return block_page_mkwrite_return(ret);
1028 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1030 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1031 size_t len, int error)
1033 struct iomap_page *iop = to_iomap_page(folio);
1036 folio_set_error(folio);
1037 mapping_set_error(inode->i_mapping, error);
1040 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
1041 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
1043 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
1044 folio_end_writeback(folio);
1048 * We're now finished for good with this ioend structure. Update the page
1049 * state, release holds on bios, and finally free up memory. Do not use the
1053 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1055 struct inode *inode = ioend->io_inode;
1056 struct bio *bio = &ioend->io_inline_bio;
1057 struct bio *last = ioend->io_bio, *next;
1058 u64 start = bio->bi_iter.bi_sector;
1059 loff_t offset = ioend->io_offset;
1060 bool quiet = bio_flagged(bio, BIO_QUIET);
1062 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1063 struct folio_iter fi;
1066 * For the last bio, bi_private points to the ioend, so we
1067 * need to explicitly end the iteration here.
1072 next = bio->bi_private;
1074 /* walk all folios in bio, ending page IO on them */
1075 bio_for_each_folio_all(fi, bio)
1076 iomap_finish_folio_write(inode, fi.folio, fi.length,
1080 /* The ioend has been freed by bio_put() */
1082 if (unlikely(error && !quiet)) {
1083 printk_ratelimited(KERN_ERR
1084 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1085 inode->i_sb->s_id, inode->i_ino, offset, start);
1090 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1092 struct list_head tmp;
1094 list_replace_init(&ioend->io_list, &tmp);
1095 iomap_finish_ioend(ioend, error);
1097 while (!list_empty(&tmp)) {
1098 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1099 list_del_init(&ioend->io_list);
1100 iomap_finish_ioend(ioend, error);
1103 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1106 * We can merge two adjacent ioends if they have the same set of work to do.
1109 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1111 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1113 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1114 (next->io_flags & IOMAP_F_SHARED))
1116 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1117 (next->io_type == IOMAP_UNWRITTEN))
1119 if (ioend->io_offset + ioend->io_size != next->io_offset)
1125 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1127 struct iomap_ioend *next;
1129 INIT_LIST_HEAD(&ioend->io_list);
1131 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1133 if (!iomap_ioend_can_merge(ioend, next))
1135 list_move_tail(&next->io_list, &ioend->io_list);
1136 ioend->io_size += next->io_size;
1139 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1142 iomap_ioend_compare(void *priv, const struct list_head *a,
1143 const struct list_head *b)
1145 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1146 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1148 if (ia->io_offset < ib->io_offset)
1150 if (ia->io_offset > ib->io_offset)
1156 iomap_sort_ioends(struct list_head *ioend_list)
1158 list_sort(NULL, ioend_list, iomap_ioend_compare);
1160 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1162 static void iomap_writepage_end_bio(struct bio *bio)
1164 struct iomap_ioend *ioend = bio->bi_private;
1166 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1170 * Submit the final bio for an ioend.
1172 * If @error is non-zero, it means that we have a situation where some part of
1173 * the submission process has failed after we've marked pages for writeback
1174 * and unlocked them. In this situation, we need to fail the bio instead of
1175 * submitting it. This typically only happens on a filesystem shutdown.
1178 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1181 ioend->io_bio->bi_private = ioend;
1182 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1184 if (wpc->ops->prepare_ioend)
1185 error = wpc->ops->prepare_ioend(ioend, error);
1188 * If we're failing the IO now, just mark the ioend with an
1189 * error and finish it. This will run IO completion immediately
1190 * as there is only one reference to the ioend at this point in
1193 ioend->io_bio->bi_status = errno_to_blk_status(error);
1194 bio_endio(ioend->io_bio);
1198 submit_bio(ioend->io_bio);
1202 static struct iomap_ioend *
1203 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1204 loff_t offset, sector_t sector, struct writeback_control *wbc)
1206 struct iomap_ioend *ioend;
1209 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
1210 bio_set_dev(bio, wpc->iomap.bdev);
1211 bio->bi_iter.bi_sector = sector;
1212 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
1213 bio->bi_write_hint = inode->i_write_hint;
1214 wbc_init_bio(wbc, bio);
1216 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1217 INIT_LIST_HEAD(&ioend->io_list);
1218 ioend->io_type = wpc->iomap.type;
1219 ioend->io_flags = wpc->iomap.flags;
1220 ioend->io_inode = inode;
1222 ioend->io_offset = offset;
1223 ioend->io_bio = bio;
1228 * Allocate a new bio, and chain the old bio to the new one.
1230 * Note that we have to perform the chaining in this unintuitive order
1231 * so that the bi_private linkage is set up in the right direction for the
1232 * traversal in iomap_finish_ioend().
1235 iomap_chain_bio(struct bio *prev)
1239 new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
1240 bio_copy_dev(new, prev);/* also copies over blkcg information */
1241 new->bi_iter.bi_sector = bio_end_sector(prev);
1242 new->bi_opf = prev->bi_opf;
1243 new->bi_write_hint = prev->bi_write_hint;
1245 bio_chain(prev, new);
1246 bio_get(prev); /* for iomap_finish_ioend */
1252 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1255 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1256 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1258 if (wpc->iomap.type != wpc->ioend->io_type)
1260 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1262 if (sector != bio_end_sector(wpc->ioend->io_bio))
1268 * Test to see if we have an existing ioend structure that we could append to
1269 * first; otherwise finish off the current ioend and start another.
1272 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1273 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1274 struct writeback_control *wbc, struct list_head *iolist)
1276 sector_t sector = iomap_sector(&wpc->iomap, pos);
1277 unsigned len = i_blocksize(inode);
1278 size_t poff = offset_in_folio(folio, pos);
1280 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1282 list_add(&wpc->ioend->io_list, iolist);
1283 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1286 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1287 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1288 bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
1292 atomic_add(len, &iop->write_bytes_pending);
1293 wpc->ioend->io_size += len;
1294 wbc_account_cgroup_owner(wbc, &folio->page, len);
1298 * We implement an immediate ioend submission policy here to avoid needing to
1299 * chain multiple ioends and hence nest mempool allocations which can violate
1300 * the forward progress guarantees we need to provide. The current ioend we're
1301 * adding blocks to is cached in the writepage context, and if the new block
1302 * doesn't append to the cached ioend, it will create a new ioend and cache that
1305 * If a new ioend is created and cached, the old ioend is returned and queued
1306 * locally for submission once the entire page is processed or an error has been
1307 * detected. While ioends are submitted immediately after they are completed,
1308 * batching optimisations are provided by higher level block plugging.
1310 * At the end of a writeback pass, there will be a cached ioend remaining on the
1311 * writepage context that the caller will need to submit.
1314 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1315 struct writeback_control *wbc, struct inode *inode,
1316 struct folio *folio, u64 end_pos)
1318 struct iomap_page *iop = iomap_page_create(inode, folio);
1319 struct iomap_ioend *ioend, *next;
1320 unsigned len = i_blocksize(inode);
1321 unsigned nblocks = i_blocks_per_folio(inode, folio);
1322 u64 pos = folio_pos(folio);
1323 int error = 0, count = 0, i;
1324 LIST_HEAD(submit_list);
1326 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
1329 * Walk through the folio to find areas to write back. If we
1330 * run off the end of the current map or find the current map
1331 * invalid, grab a new one.
1333 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
1334 if (iop && !test_bit(i, iop->uptodate))
1337 error = wpc->ops->map_blocks(wpc, inode, pos);
1340 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1342 if (wpc->iomap.type == IOMAP_HOLE)
1344 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
1349 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1350 WARN_ON_ONCE(!folio_test_locked(folio));
1351 WARN_ON_ONCE(folio_test_writeback(folio));
1352 WARN_ON_ONCE(folio_test_dirty(folio));
1355 * We cannot cancel the ioend directly here on error. We may have
1356 * already set other pages under writeback and hence we have to run I/O
1357 * completion to mark the error state of the pages under writeback
1360 if (unlikely(error)) {
1362 * Let the filesystem know what portion of the current page
1363 * failed to map. If the page hasn't been added to ioend, it
1364 * won't be affected by I/O completion and we must unlock it
1367 if (wpc->ops->discard_folio)
1368 wpc->ops->discard_folio(folio, pos);
1370 folio_clear_uptodate(folio);
1371 folio_unlock(folio);
1376 folio_start_writeback(folio);
1377 folio_unlock(folio);
1380 * Preserve the original error if there was one; catch
1381 * submission errors here and propagate into subsequent ioend
1384 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1387 list_del_init(&ioend->io_list);
1388 error2 = iomap_submit_ioend(wpc, ioend, error);
1389 if (error2 && !error)
1394 * We can end up here with no error and nothing to write only if we race
1395 * with a partial page truncate on a sub-page block sized filesystem.
1398 folio_end_writeback(folio);
1400 mapping_set_error(folio->mapping, error);
1405 * Write out a dirty page.
1407 * For delalloc space on the page, we need to allocate space and flush it.
1408 * For unwritten space on the page, we need to start the conversion to
1409 * regular allocated space.
1412 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1414 struct folio *folio = page_folio(page);
1415 struct iomap_writepage_ctx *wpc = data;
1416 struct inode *inode = folio->mapping->host;
1419 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1422 * Refuse to write the folio out if we're called from reclaim context.
1424 * This avoids stack overflows when called from deeply used stacks in
1425 * random callers for direct reclaim or memcg reclaim. We explicitly
1426 * allow reclaim from kswapd as the stack usage there is relatively low.
1428 * This should never happen except in the case of a VM regression so
1431 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1436 * Is this folio beyond the end of the file?
1438 * The folio index is less than the end_index, adjust the end_pos
1439 * to the highest offset that this folio should represent.
1440 * -----------------------------------------------------
1441 * | file mapping | <EOF> |
1442 * -----------------------------------------------------
1443 * | Page ... | Page N-2 | Page N-1 | Page N | |
1444 * ^--------------------------------^----------|--------
1445 * | desired writeback range | see else |
1446 * ---------------------------------^------------------|
1448 isize = i_size_read(inode);
1449 end_pos = folio_pos(folio) + folio_size(folio);
1450 if (end_pos > isize) {
1452 * Check whether the page to write out is beyond or straddles
1454 * -------------------------------------------------------
1455 * | file mapping | <EOF> |
1456 * -------------------------------------------------------
1457 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1458 * ^--------------------------------^-----------|---------
1460 * ---------------------------------^-----------|--------|
1462 size_t poff = offset_in_folio(folio, isize);
1463 pgoff_t end_index = isize >> PAGE_SHIFT;
1466 * Skip the page if it's fully outside i_size, e.g. due to a
1467 * truncate operation that's in progress. We must redirty the
1468 * page so that reclaim stops reclaiming it. Otherwise
1469 * iomap_vm_releasepage() is called on it and gets confused.
1471 * Note that the end_index is unsigned long. If the given
1472 * offset is greater than 16TB on a 32-bit system then if we
1473 * checked if the page is fully outside i_size with
1474 * "if (page->index >= end_index + 1)", "end_index + 1" would
1475 * overflow and evaluate to 0. Hence this page would be
1476 * redirtied and written out repeatedly, which would result in
1477 * an infinite loop; the user program performing this operation
1478 * would hang. Instead, we can detect this situation by
1479 * checking if the page is totally beyond i_size or if its
1480 * offset is just equal to the EOF.
1482 if (folio->index > end_index ||
1483 (folio->index == end_index && poff == 0))
1487 * The page straddles i_size. It must be zeroed out on each
1488 * and every writepage invocation because it may be mmapped.
1489 * "A file is mapped in multiples of the page size. For a file
1490 * that is not a multiple of the page size, the remaining
1491 * memory is zeroed when mapped, and writes to that region are
1492 * not written out to the file."
1494 folio_zero_segment(folio, poff, folio_size(folio));
1498 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1501 folio_redirty_for_writepage(wbc, folio);
1502 folio_unlock(folio);
1507 iomap_writepage(struct page *page, struct writeback_control *wbc,
1508 struct iomap_writepage_ctx *wpc,
1509 const struct iomap_writeback_ops *ops)
1514 ret = iomap_do_writepage(page, wbc, wpc);
1517 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1519 EXPORT_SYMBOL_GPL(iomap_writepage);
1522 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1523 struct iomap_writepage_ctx *wpc,
1524 const struct iomap_writeback_ops *ops)
1529 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1532 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1534 EXPORT_SYMBOL_GPL(iomap_writepages);
1536 static int __init iomap_init(void)
1538 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1539 offsetof(struct iomap_ioend, io_inline_bio),
1542 fs_initcall(iomap_init);