1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
17 * mark a page as having been made dirty and thus needing writeback
19 int afs_set_page_dirty(struct page *page)
22 return __set_page_dirty_nobuffers(page);
26 * partly or wholly fill a page that's under preparation for writing
28 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
29 loff_t pos, unsigned int len, struct page *page)
36 _enter(",,%llu", (unsigned long long)pos);
38 if (pos >= vnode->vfs_inode.i_size) {
40 ASSERTCMP(p + len, <=, PAGE_SIZE);
42 memset(data + p, 0, len);
47 req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
51 refcount_set(&req->usage, 1);
55 req->pages = req->array;
59 ret = afs_fetch_data(vnode, key, req);
63 _debug("got NOENT from server"
64 " - marking file deleted and stale");
65 set_bit(AFS_VNODE_DELETED, &vnode->flags);
75 * prepare to perform part of a write to a page
77 int afs_write_begin(struct file *file, struct address_space *mapping,
78 loff_t pos, unsigned len, unsigned flags,
79 struct page **_page, void **fsdata)
81 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
83 struct key *key = afs_file_key(file);
85 unsigned f, from = pos & (PAGE_SIZE - 1);
86 unsigned t, to = from + len;
87 pgoff_t index = pos >> PAGE_SHIFT;
90 _enter("{%llx:%llu},{%lx},%u,%u",
91 vnode->fid.vid, vnode->fid.vnode, index, from, to);
93 /* We want to store information about how much of a page is altered in
96 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
98 page = grab_cache_page_write_begin(mapping, index, flags);
102 if (!PageUptodate(page) && len != PAGE_SIZE) {
103 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
107 _leave(" = %d [prep]", ret);
110 SetPageUptodate(page);
114 /* See if this page is already partially written in a way that we can
115 * merge the new write with.
118 if (PagePrivate(page)) {
119 priv = page_private(page);
120 f = afs_page_dirty_from(priv);
121 t = afs_page_dirty_to(priv);
126 if (PageWriteback(page)) {
127 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
129 goto flush_conflicting_write;
131 /* If the file is being filled locally, allow inter-write
132 * spaces to be merged into writes. If it's not, only write
133 * back what the user gives us.
135 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
136 (to < f || from > t))
137 goto flush_conflicting_write;
144 /* The previous write and this write aren't adjacent or overlapping, so
145 * flush the page out.
147 flush_conflicting_write:
148 _debug("flush conflict");
149 ret = write_one_page(page);
153 ret = lock_page_killable(page);
160 _leave(" = %d", ret);
165 * finalise part of a write to a page
167 int afs_write_end(struct file *file, struct address_space *mapping,
168 loff_t pos, unsigned len, unsigned copied,
169 struct page *page, void *fsdata)
171 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
172 struct key *key = afs_file_key(file);
174 unsigned int f, from = pos & (PAGE_SIZE - 1);
175 unsigned int t, to = from + copied;
176 loff_t i_size, maybe_i_size;
179 _enter("{%llx:%llu},{%lx}",
180 vnode->fid.vid, vnode->fid.vnode, page->index);
182 maybe_i_size = pos + copied;
184 i_size = i_size_read(&vnode->vfs_inode);
185 if (maybe_i_size > i_size) {
186 write_seqlock(&vnode->cb_lock);
187 i_size = i_size_read(&vnode->vfs_inode);
188 if (maybe_i_size > i_size)
189 i_size_write(&vnode->vfs_inode, maybe_i_size);
190 write_sequnlock(&vnode->cb_lock);
193 if (!PageUptodate(page)) {
195 /* Try and load any missing data from the server. The
196 * unmarshalling routine will take care of clearing any
197 * bits that are beyond the EOF.
199 ret = afs_fill_page(vnode, key, pos + copied,
204 SetPageUptodate(page);
207 if (PagePrivate(page)) {
208 priv = page_private(page);
209 f = afs_page_dirty_from(priv);
210 t = afs_page_dirty_to(priv);
215 priv = afs_page_dirty(f, t);
216 set_page_private(page, priv);
217 trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
220 priv = afs_page_dirty(from, to);
221 attach_page_private(page, (void *)priv);
222 trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
226 set_page_dirty(page);
238 * kill all the pages in the given range
240 static void afs_kill_pages(struct address_space *mapping,
241 pgoff_t first, pgoff_t last)
243 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
245 unsigned count, loop;
247 _enter("{%llx:%llu},%lx-%lx",
248 vnode->fid.vid, vnode->fid.vnode, first, last);
253 _debug("kill %lx-%lx", first, last);
255 count = last - first + 1;
256 if (count > PAGEVEC_SIZE)
257 count = PAGEVEC_SIZE;
258 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
259 ASSERTCMP(pv.nr, ==, count);
261 for (loop = 0; loop < count; loop++) {
262 struct page *page = pv.pages[loop];
263 ClearPageUptodate(page);
265 end_page_writeback(page);
266 if (page->index >= first)
267 first = page->index + 1;
269 generic_error_remove_page(mapping, page);
273 __pagevec_release(&pv);
274 } while (first <= last);
280 * Redirty all the pages in a given range.
282 static void afs_redirty_pages(struct writeback_control *wbc,
283 struct address_space *mapping,
284 pgoff_t first, pgoff_t last)
286 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
288 unsigned count, loop;
290 _enter("{%llx:%llu},%lx-%lx",
291 vnode->fid.vid, vnode->fid.vnode, first, last);
296 _debug("redirty %lx-%lx", first, last);
298 count = last - first + 1;
299 if (count > PAGEVEC_SIZE)
300 count = PAGEVEC_SIZE;
301 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
302 ASSERTCMP(pv.nr, ==, count);
304 for (loop = 0; loop < count; loop++) {
305 struct page *page = pv.pages[loop];
307 redirty_page_for_writepage(wbc, page);
308 end_page_writeback(page);
309 if (page->index >= first)
310 first = page->index + 1;
313 __pagevec_release(&pv);
314 } while (first <= last);
320 * completion of write to server
322 static void afs_pages_written_back(struct afs_vnode *vnode,
323 pgoff_t first, pgoff_t last)
327 unsigned count, loop;
329 _enter("{%llx:%llu},{%lx-%lx}",
330 vnode->fid.vid, vnode->fid.vnode, first, last);
335 _debug("done %lx-%lx", first, last);
337 count = last - first + 1;
338 if (count > PAGEVEC_SIZE)
339 count = PAGEVEC_SIZE;
340 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
341 first, count, pv.pages);
342 ASSERTCMP(pv.nr, ==, count);
344 for (loop = 0; loop < count; loop++) {
345 priv = (unsigned long)detach_page_private(pv.pages[loop]);
346 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
347 pv.pages[loop]->index, priv);
348 end_page_writeback(pv.pages[loop]);
351 __pagevec_release(&pv);
352 } while (first <= last);
354 afs_prune_wb_keys(vnode);
359 * Find a key to use for the writeback. We cached the keys used to author the
360 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
361 * and we need to start from there if it's set.
363 static int afs_get_writeback_key(struct afs_vnode *vnode,
364 struct afs_wb_key **_wbk)
366 struct afs_wb_key *wbk = NULL;
368 int ret = -ENOKEY, ret2;
370 spin_lock(&vnode->wb_lock);
372 p = (*_wbk)->vnode_link.next;
374 p = vnode->wb_keys.next;
376 while (p != &vnode->wb_keys) {
377 wbk = list_entry(p, struct afs_wb_key, vnode_link);
378 _debug("wbk %u", key_serial(wbk->key));
379 ret2 = key_validate(wbk->key);
381 refcount_inc(&wbk->usage);
382 _debug("USE WB KEY %u", key_serial(wbk->key));
392 spin_unlock(&vnode->wb_lock);
394 afs_put_wb_key(*_wbk);
399 static void afs_store_data_success(struct afs_operation *op)
401 struct afs_vnode *vnode = op->file[0].vnode;
403 op->ctime = op->file[0].scb.status.mtime_client;
404 afs_vnode_commit_status(op, &op->file[0]);
405 if (op->error == 0) {
406 if (!op->store.laundering)
407 afs_pages_written_back(vnode, op->store.first, op->store.last);
408 afs_stat_v(vnode, n_stores);
409 atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
410 (op->store.first * PAGE_SIZE + op->store.first_offset),
411 &afs_v2net(vnode)->n_store_bytes);
415 static const struct afs_operation_ops afs_store_data_operation = {
416 .issue_afs_rpc = afs_fs_store_data,
417 .issue_yfs_rpc = yfs_fs_store_data,
418 .success = afs_store_data_success,
424 static int afs_store_data(struct address_space *mapping,
425 pgoff_t first, pgoff_t last,
426 unsigned offset, unsigned to, bool laundering)
428 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
429 struct afs_operation *op;
430 struct afs_wb_key *wbk = NULL;
433 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
438 first, last, offset, to);
440 ret = afs_get_writeback_key(vnode, &wbk);
442 _leave(" = %d [no keys]", ret);
446 op = afs_alloc_operation(wbk->key, vnode->volume);
452 afs_op_set_vnode(op, 0, vnode);
453 op->file[0].dv_delta = 1;
454 op->store.mapping = mapping;
455 op->store.first = first;
456 op->store.last = last;
457 op->store.first_offset = offset;
458 op->store.last_to = to;
459 op->store.laundering = laundering;
460 op->mtime = vnode->vfs_inode.i_mtime;
461 op->flags |= AFS_OPERATION_UNINTR;
462 op->ops = &afs_store_data_operation;
465 afs_begin_vnode_operation(op);
466 afs_wait_for_operation(op);
477 ret = afs_get_writeback_key(vnode, &wbk);
480 op->key = key_get(wbk->key);
487 _leave(" = %d", op->error);
488 return afs_put_operation(op);
492 * Synchronously write back the locked page and any subsequent non-locked dirty
495 static int afs_write_back_from_locked_page(struct address_space *mapping,
496 struct writeback_control *wbc,
497 struct page *primary_page,
500 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
501 struct page *pages[8], *page;
502 unsigned long count, priv;
503 unsigned n, offset, to, f, t;
504 pgoff_t start, first, last;
508 _enter(",%lx", primary_page->index);
511 if (test_set_page_writeback(primary_page))
514 /* Find all consecutive lockable dirty pages that have contiguous
515 * written regions, stopping when we find a page that is not
516 * immediately lockable, is not dirty or is missing, or we reach the
519 start = primary_page->index;
520 priv = page_private(primary_page);
521 offset = afs_page_dirty_from(priv);
522 to = afs_page_dirty_to(priv);
523 trace_afs_page_dirty(vnode, tracepoint_string("store"),
524 primary_page->index, priv);
526 WARN_ON(offset == to);
528 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
529 primary_page->index, priv);
531 if (start >= final_page ||
532 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
537 _debug("more %lx [%lx]", start, count);
538 n = final_page - start + 1;
539 if (n > ARRAY_SIZE(pages))
540 n = ARRAY_SIZE(pages);
541 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
542 _debug("fgpc %u", n);
545 if (pages[0]->index != start) {
547 put_page(pages[--n]);
552 for (loop = 0; loop < n; loop++) {
554 if (to != PAGE_SIZE &&
555 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
557 if (page->index > final_page)
559 if (!trylock_page(page))
561 if (!PageDirty(page) || PageWriteback(page)) {
566 priv = page_private(page);
567 f = afs_page_dirty_from(priv);
568 t = afs_page_dirty_to(priv);
570 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
576 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
579 if (!clear_page_dirty_for_io(page))
581 if (test_set_page_writeback(page))
588 for (; loop < n; loop++)
589 put_page(pages[loop]);
594 } while (start <= final_page && count < 65536);
597 /* We now have a contiguous set of dirty pages, each with writeback
598 * set; the first page is still locked at this point, but all the rest
599 * have been unlocked.
601 unlock_page(primary_page);
603 first = primary_page->index;
604 last = first + count - 1;
606 end = (loff_t)last * PAGE_SIZE + to;
607 i_size = i_size_read(&vnode->vfs_inode);
609 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
611 to = i_size & ~PAGE_MASK;
613 ret = afs_store_data(mapping, first, last, offset, to, false);
620 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
628 afs_redirty_pages(wbc, mapping, first, last);
629 mapping_set_error(mapping, ret);
634 afs_redirty_pages(wbc, mapping, first, last);
635 mapping_set_error(mapping, -ENOSPC);
645 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
646 afs_kill_pages(mapping, first, last);
647 mapping_set_error(mapping, ret);
651 _leave(" = %d", ret);
656 * write a page back to the server
657 * - the caller locked the page for us
659 int afs_writepage(struct page *page, struct writeback_control *wbc)
663 _enter("{%lx},", page->index);
665 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
666 wbc->range_end >> PAGE_SHIFT);
668 _leave(" = %d", ret);
672 wbc->nr_to_write -= ret;
679 * write a region of pages back to the server
681 static int afs_writepages_region(struct address_space *mapping,
682 struct writeback_control *wbc,
683 pgoff_t index, pgoff_t end, pgoff_t *_next)
688 _enter(",,%lx,%lx,", index, end);
691 n = find_get_pages_range_tag(mapping, &index, end,
692 PAGECACHE_TAG_DIRTY, 1, &page);
696 _debug("wback %lx", page->index);
699 * at this point we hold neither the i_pages lock nor the
700 * page lock: the page may be truncated or invalidated
701 * (changing page->mapping to NULL), or even swizzled
702 * back from swapper_space to tmpfs file mapping
704 ret = lock_page_killable(page);
707 _leave(" = %d", ret);
711 if (page->mapping != mapping || !PageDirty(page)) {
717 if (PageWriteback(page)) {
719 if (wbc->sync_mode != WB_SYNC_NONE)
720 wait_on_page_writeback(page);
725 if (!clear_page_dirty_for_io(page))
727 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
730 _leave(" = %d", ret);
734 wbc->nr_to_write -= ret;
737 } while (index < end && wbc->nr_to_write > 0);
740 _leave(" = 0 [%lx]", *_next);
745 * write some of the pending data back to the server
747 int afs_writepages(struct address_space *mapping,
748 struct writeback_control *wbc)
750 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
751 pgoff_t start, end, next;
756 /* We have to be careful as we can end up racing with setattr()
757 * truncating the pagecache since the caller doesn't take a lock here
760 if (wbc->sync_mode == WB_SYNC_ALL)
761 down_read(&vnode->validate_lock);
762 else if (!down_read_trylock(&vnode->validate_lock))
765 if (wbc->range_cyclic) {
766 start = mapping->writeback_index;
768 ret = afs_writepages_region(mapping, wbc, start, end, &next);
769 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
770 ret = afs_writepages_region(mapping, wbc, 0, start,
772 mapping->writeback_index = next;
773 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
774 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
775 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
776 if (wbc->nr_to_write > 0)
777 mapping->writeback_index = next;
779 start = wbc->range_start >> PAGE_SHIFT;
780 end = wbc->range_end >> PAGE_SHIFT;
781 ret = afs_writepages_region(mapping, wbc, start, end, &next);
784 up_read(&vnode->validate_lock);
785 _leave(" = %d", ret);
790 * write to an AFS file
792 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
794 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
796 size_t count = iov_iter_count(from);
798 _enter("{%llx:%llu},{%zu},",
799 vnode->fid.vid, vnode->fid.vnode, count);
801 if (IS_SWAPFILE(&vnode->vfs_inode)) {
803 "AFS: Attempt to write to active swap file!\n");
810 result = generic_file_write_iter(iocb, from);
812 _leave(" = %zd", result);
817 * flush any dirty pages for this process, and check for write errors.
818 * - the return status from this call provides a reliable indication of
819 * whether any write errors occurred for this process.
821 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
823 struct inode *inode = file_inode(file);
824 struct afs_vnode *vnode = AFS_FS_I(inode);
826 _enter("{%llx:%llu},{n=%pD},%d",
827 vnode->fid.vid, vnode->fid.vnode, file,
830 return file_write_and_wait_range(file, start, end);
834 * notification that a previously read-only page is about to become writable
835 * - if it returns an error, the caller will deliver a bus error signal
837 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
839 struct file *file = vmf->vma->vm_file;
840 struct inode *inode = file_inode(file);
841 struct afs_vnode *vnode = AFS_FS_I(inode);
844 _enter("{{%llx:%llu}},{%lx}",
845 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
847 sb_start_pagefault(inode->i_sb);
849 /* Wait for the page to be written to the cache before we allow it to
850 * be modified. We then assume the entire page will need writing back.
852 #ifdef CONFIG_AFS_FSCACHE
853 fscache_wait_on_page_write(vnode->cache, vmf->page);
856 if (PageWriteback(vmf->page) &&
857 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
858 return VM_FAULT_RETRY;
860 if (lock_page_killable(vmf->page) < 0)
861 return VM_FAULT_RETRY;
863 /* We mustn't change page->private until writeback is complete as that
864 * details the portion of the page we need to write back and we might
865 * need to redirty the page if there's a problem.
867 wait_on_page_writeback(vmf->page);
869 priv = afs_page_dirty(0, PAGE_SIZE);
870 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
871 vmf->page->index, priv);
872 if (PagePrivate(vmf->page))
873 set_page_private(vmf->page, priv);
875 attach_page_private(vmf->page, (void *)priv);
876 file_update_time(file);
878 sb_end_pagefault(inode->i_sb);
879 return VM_FAULT_LOCKED;
883 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
885 void afs_prune_wb_keys(struct afs_vnode *vnode)
887 LIST_HEAD(graveyard);
888 struct afs_wb_key *wbk, *tmp;
890 /* Discard unused keys */
891 spin_lock(&vnode->wb_lock);
893 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
894 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
895 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
896 if (refcount_read(&wbk->usage) == 1)
897 list_move(&wbk->vnode_link, &graveyard);
901 spin_unlock(&vnode->wb_lock);
903 while (!list_empty(&graveyard)) {
904 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
905 list_del(&wbk->vnode_link);
911 * Clean up a page during invalidation.
913 int afs_launder_page(struct page *page)
915 struct address_space *mapping = page->mapping;
916 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
921 _enter("{%lx}", page->index);
923 priv = page_private(page);
924 if (clear_page_dirty_for_io(page)) {
927 if (PagePrivate(page)) {
928 f = afs_page_dirty_from(priv);
929 t = afs_page_dirty_to(priv);
932 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
934 ret = afs_store_data(mapping, page->index, page->index, t, f, true);
937 priv = (unsigned long)detach_page_private(page);
938 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
941 #ifdef CONFIG_AFS_FSCACHE
942 if (PageFsCache(page)) {
943 fscache_wait_on_page_write(vnode->cache, page);
944 fscache_uncache_page(vnode->cache, page);