Merge tag 'block-5.8-2020-06-19' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / fs / afs / write.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include "internal.h"
15
16 /*
17  * mark a page as having been made dirty and thus needing writeback
18  */
19 int afs_set_page_dirty(struct page *page)
20 {
21         _enter("");
22         return __set_page_dirty_nobuffers(page);
23 }
24
25 /*
26  * partly or wholly fill a page that's under preparation for writing
27  */
28 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
29                          loff_t pos, unsigned int len, struct page *page)
30 {
31         struct afs_read *req;
32         size_t p;
33         void *data;
34         int ret;
35
36         _enter(",,%llu", (unsigned long long)pos);
37
38         if (pos >= vnode->vfs_inode.i_size) {
39                 p = pos & ~PAGE_MASK;
40                 ASSERTCMP(p + len, <=, PAGE_SIZE);
41                 data = kmap(page);
42                 memset(data + p, 0, len);
43                 kunmap(page);
44                 return 0;
45         }
46
47         req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
48         if (!req)
49                 return -ENOMEM;
50
51         refcount_set(&req->usage, 1);
52         req->pos = pos;
53         req->len = len;
54         req->nr_pages = 1;
55         req->pages = req->array;
56         req->pages[0] = page;
57         get_page(page);
58
59         ret = afs_fetch_data(vnode, key, req);
60         afs_put_read(req);
61         if (ret < 0) {
62                 if (ret == -ENOENT) {
63                         _debug("got NOENT from server"
64                                " - marking file deleted and stale");
65                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
66                         ret = -ESTALE;
67                 }
68         }
69
70         _leave(" = %d", ret);
71         return ret;
72 }
73
74 /*
75  * prepare to perform part of a write to a page
76  */
77 int afs_write_begin(struct file *file, struct address_space *mapping,
78                     loff_t pos, unsigned len, unsigned flags,
79                     struct page **pagep, void **fsdata)
80 {
81         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
82         struct page *page;
83         struct key *key = afs_file_key(file);
84         unsigned long priv;
85         unsigned f, from = pos & (PAGE_SIZE - 1);
86         unsigned t, to = from + len;
87         pgoff_t index = pos >> PAGE_SHIFT;
88         int ret;
89
90         _enter("{%llx:%llu},{%lx},%u,%u",
91                vnode->fid.vid, vnode->fid.vnode, index, from, to);
92
93         /* We want to store information about how much of a page is altered in
94          * page->private.
95          */
96         BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
97
98         page = grab_cache_page_write_begin(mapping, index, flags);
99         if (!page)
100                 return -ENOMEM;
101
102         if (!PageUptodate(page) && len != PAGE_SIZE) {
103                 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
104                 if (ret < 0) {
105                         unlock_page(page);
106                         put_page(page);
107                         _leave(" = %d [prep]", ret);
108                         return ret;
109                 }
110                 SetPageUptodate(page);
111         }
112
113         /* page won't leak in error case: it eventually gets cleaned off LRU */
114         *pagep = page;
115
116 try_again:
117         /* See if this page is already partially written in a way that we can
118          * merge the new write with.
119          */
120         t = f = 0;
121         if (PagePrivate(page)) {
122                 priv = page_private(page);
123                 f = priv & AFS_PRIV_MAX;
124                 t = priv >> AFS_PRIV_SHIFT;
125                 ASSERTCMP(f, <=, t);
126         }
127
128         if (f != t) {
129                 if (PageWriteback(page)) {
130                         trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
131                                              page->index, priv);
132                         goto flush_conflicting_write;
133                 }
134                 /* If the file is being filled locally, allow inter-write
135                  * spaces to be merged into writes.  If it's not, only write
136                  * back what the user gives us.
137                  */
138                 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
139                     (to < f || from > t))
140                         goto flush_conflicting_write;
141                 if (from < f)
142                         f = from;
143                 if (to > t)
144                         t = to;
145         } else {
146                 f = from;
147                 t = to;
148         }
149
150         priv = (unsigned long)t << AFS_PRIV_SHIFT;
151         priv |= f;
152         trace_afs_page_dirty(vnode, tracepoint_string("begin"),
153                              page->index, priv);
154         SetPagePrivate(page);
155         set_page_private(page, priv);
156         _leave(" = 0");
157         return 0;
158
159         /* The previous write and this write aren't adjacent or overlapping, so
160          * flush the page out.
161          */
162 flush_conflicting_write:
163         _debug("flush conflict");
164         ret = write_one_page(page);
165         if (ret < 0) {
166                 _leave(" = %d", ret);
167                 return ret;
168         }
169
170         ret = lock_page_killable(page);
171         if (ret < 0) {
172                 _leave(" = %d", ret);
173                 return ret;
174         }
175         goto try_again;
176 }
177
178 /*
179  * finalise part of a write to a page
180  */
181 int afs_write_end(struct file *file, struct address_space *mapping,
182                   loff_t pos, unsigned len, unsigned copied,
183                   struct page *page, void *fsdata)
184 {
185         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
186         struct key *key = afs_file_key(file);
187         loff_t i_size, maybe_i_size;
188         int ret;
189
190         _enter("{%llx:%llu},{%lx}",
191                vnode->fid.vid, vnode->fid.vnode, page->index);
192
193         maybe_i_size = pos + copied;
194
195         i_size = i_size_read(&vnode->vfs_inode);
196         if (maybe_i_size > i_size) {
197                 write_seqlock(&vnode->cb_lock);
198                 i_size = i_size_read(&vnode->vfs_inode);
199                 if (maybe_i_size > i_size)
200                         i_size_write(&vnode->vfs_inode, maybe_i_size);
201                 write_sequnlock(&vnode->cb_lock);
202         }
203
204         if (!PageUptodate(page)) {
205                 if (copied < len) {
206                         /* Try and load any missing data from the server.  The
207                          * unmarshalling routine will take care of clearing any
208                          * bits that are beyond the EOF.
209                          */
210                         ret = afs_fill_page(vnode, key, pos + copied,
211                                             len - copied, page);
212                         if (ret < 0)
213                                 goto out;
214                 }
215                 SetPageUptodate(page);
216         }
217
218         set_page_dirty(page);
219         if (PageDirty(page))
220                 _debug("dirtied");
221         ret = copied;
222
223 out:
224         unlock_page(page);
225         put_page(page);
226         return ret;
227 }
228
229 /*
230  * kill all the pages in the given range
231  */
232 static void afs_kill_pages(struct address_space *mapping,
233                            pgoff_t first, pgoff_t last)
234 {
235         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
236         struct pagevec pv;
237         unsigned count, loop;
238
239         _enter("{%llx:%llu},%lx-%lx",
240                vnode->fid.vid, vnode->fid.vnode, first, last);
241
242         pagevec_init(&pv);
243
244         do {
245                 _debug("kill %lx-%lx", first, last);
246
247                 count = last - first + 1;
248                 if (count > PAGEVEC_SIZE)
249                         count = PAGEVEC_SIZE;
250                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
251                 ASSERTCMP(pv.nr, ==, count);
252
253                 for (loop = 0; loop < count; loop++) {
254                         struct page *page = pv.pages[loop];
255                         ClearPageUptodate(page);
256                         SetPageError(page);
257                         end_page_writeback(page);
258                         if (page->index >= first)
259                                 first = page->index + 1;
260                         lock_page(page);
261                         generic_error_remove_page(mapping, page);
262                         unlock_page(page);
263                 }
264
265                 __pagevec_release(&pv);
266         } while (first <= last);
267
268         _leave("");
269 }
270
271 /*
272  * Redirty all the pages in a given range.
273  */
274 static void afs_redirty_pages(struct writeback_control *wbc,
275                               struct address_space *mapping,
276                               pgoff_t first, pgoff_t last)
277 {
278         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
279         struct pagevec pv;
280         unsigned count, loop;
281
282         _enter("{%llx:%llu},%lx-%lx",
283                vnode->fid.vid, vnode->fid.vnode, first, last);
284
285         pagevec_init(&pv);
286
287         do {
288                 _debug("redirty %lx-%lx", first, last);
289
290                 count = last - first + 1;
291                 if (count > PAGEVEC_SIZE)
292                         count = PAGEVEC_SIZE;
293                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
294                 ASSERTCMP(pv.nr, ==, count);
295
296                 for (loop = 0; loop < count; loop++) {
297                         struct page *page = pv.pages[loop];
298
299                         redirty_page_for_writepage(wbc, page);
300                         end_page_writeback(page);
301                         if (page->index >= first)
302                                 first = page->index + 1;
303                 }
304
305                 __pagevec_release(&pv);
306         } while (first <= last);
307
308         _leave("");
309 }
310
311 /*
312  * completion of write to server
313  */
314 static void afs_pages_written_back(struct afs_vnode *vnode,
315                                    pgoff_t first, pgoff_t last)
316 {
317         struct pagevec pv;
318         unsigned long priv;
319         unsigned count, loop;
320
321         _enter("{%llx:%llu},{%lx-%lx}",
322                vnode->fid.vid, vnode->fid.vnode, first, last);
323
324         pagevec_init(&pv);
325
326         do {
327                 _debug("done %lx-%lx", first, last);
328
329                 count = last - first + 1;
330                 if (count > PAGEVEC_SIZE)
331                         count = PAGEVEC_SIZE;
332                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
333                                               first, count, pv.pages);
334                 ASSERTCMP(pv.nr, ==, count);
335
336                 for (loop = 0; loop < count; loop++) {
337                         priv = page_private(pv.pages[loop]);
338                         trace_afs_page_dirty(vnode, tracepoint_string("clear"),
339                                              pv.pages[loop]->index, priv);
340                         set_page_private(pv.pages[loop], 0);
341                         end_page_writeback(pv.pages[loop]);
342                 }
343                 first += count;
344                 __pagevec_release(&pv);
345         } while (first <= last);
346
347         afs_prune_wb_keys(vnode);
348         _leave("");
349 }
350
351 /*
352  * Find a key to use for the writeback.  We cached the keys used to author the
353  * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
354  * and we need to start from there if it's set.
355  */
356 static int afs_get_writeback_key(struct afs_vnode *vnode,
357                                  struct afs_wb_key **_wbk)
358 {
359         struct afs_wb_key *wbk = NULL;
360         struct list_head *p;
361         int ret = -ENOKEY, ret2;
362
363         spin_lock(&vnode->wb_lock);
364         if (*_wbk)
365                 p = (*_wbk)->vnode_link.next;
366         else
367                 p = vnode->wb_keys.next;
368
369         while (p != &vnode->wb_keys) {
370                 wbk = list_entry(p, struct afs_wb_key, vnode_link);
371                 _debug("wbk %u", key_serial(wbk->key));
372                 ret2 = key_validate(wbk->key);
373                 if (ret2 == 0) {
374                         refcount_inc(&wbk->usage);
375                         _debug("USE WB KEY %u", key_serial(wbk->key));
376                         break;
377                 }
378
379                 wbk = NULL;
380                 if (ret == -ENOKEY)
381                         ret = ret2;
382                 p = p->next;
383         }
384
385         spin_unlock(&vnode->wb_lock);
386         if (*_wbk)
387                 afs_put_wb_key(*_wbk);
388         *_wbk = wbk;
389         return 0;
390 }
391
392 static void afs_store_data_success(struct afs_operation *op)
393 {
394         struct afs_vnode *vnode = op->file[0].vnode;
395
396         op->ctime = op->file[0].scb.status.mtime_client;
397         afs_vnode_commit_status(op, &op->file[0]);
398         if (op->error == 0) {
399                 afs_pages_written_back(vnode, op->store.first, op->store.last);
400                 afs_stat_v(vnode, n_stores);
401                 atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
402                                 (op->store.first * PAGE_SIZE + op->store.first_offset),
403                                 &afs_v2net(vnode)->n_store_bytes);
404         }
405 }
406
407 static const struct afs_operation_ops afs_store_data_operation = {
408         .issue_afs_rpc  = afs_fs_store_data,
409         .issue_yfs_rpc  = yfs_fs_store_data,
410         .success        = afs_store_data_success,
411 };
412
413 /*
414  * write to a file
415  */
416 static int afs_store_data(struct address_space *mapping,
417                           pgoff_t first, pgoff_t last,
418                           unsigned offset, unsigned to)
419 {
420         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
421         struct afs_operation *op;
422         struct afs_wb_key *wbk = NULL;
423         int ret;
424
425         _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
426                vnode->volume->name,
427                vnode->fid.vid,
428                vnode->fid.vnode,
429                vnode->fid.unique,
430                first, last, offset, to);
431
432         ret = afs_get_writeback_key(vnode, &wbk);
433         if (ret) {
434                 _leave(" = %d [no keys]", ret);
435                 return ret;
436         }
437
438         op = afs_alloc_operation(wbk->key, vnode->volume);
439         if (IS_ERR(op)) {
440                 afs_put_wb_key(wbk);
441                 return -ENOMEM;
442         }
443
444         afs_op_set_vnode(op, 0, vnode);
445         op->file[0].dv_delta = 1;
446         op->store.mapping = mapping;
447         op->store.first = first;
448         op->store.last = last;
449         op->store.first_offset = offset;
450         op->store.last_to = to;
451         op->mtime = vnode->vfs_inode.i_mtime;
452         op->ops = &afs_store_data_operation;
453
454 try_next_key:
455         afs_begin_vnode_operation(op);
456         afs_wait_for_operation(op);
457
458         switch (op->error) {
459         case -EACCES:
460         case -EPERM:
461         case -ENOKEY:
462         case -EKEYEXPIRED:
463         case -EKEYREJECTED:
464         case -EKEYREVOKED:
465                 _debug("next");
466
467                 ret = afs_get_writeback_key(vnode, &wbk);
468                 if (ret == 0) {
469                         key_put(op->key);
470                         op->key = key_get(wbk->key);
471                         goto try_next_key;
472                 }
473                 break;
474         }
475
476         afs_put_wb_key(wbk);
477         _leave(" = %d", op->error);
478         return afs_put_operation(op);
479 }
480
481 /*
482  * Synchronously write back the locked page and any subsequent non-locked dirty
483  * pages.
484  */
485 static int afs_write_back_from_locked_page(struct address_space *mapping,
486                                            struct writeback_control *wbc,
487                                            struct page *primary_page,
488                                            pgoff_t final_page)
489 {
490         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
491         struct page *pages[8], *page;
492         unsigned long count, priv;
493         unsigned n, offset, to, f, t;
494         pgoff_t start, first, last;
495         loff_t i_size, end;
496         int loop, ret;
497
498         _enter(",%lx", primary_page->index);
499
500         count = 1;
501         if (test_set_page_writeback(primary_page))
502                 BUG();
503
504         /* Find all consecutive lockable dirty pages that have contiguous
505          * written regions, stopping when we find a page that is not
506          * immediately lockable, is not dirty or is missing, or we reach the
507          * end of the range.
508          */
509         start = primary_page->index;
510         priv = page_private(primary_page);
511         offset = priv & AFS_PRIV_MAX;
512         to = priv >> AFS_PRIV_SHIFT;
513         trace_afs_page_dirty(vnode, tracepoint_string("store"),
514                              primary_page->index, priv);
515
516         WARN_ON(offset == to);
517         if (offset == to)
518                 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
519                                      primary_page->index, priv);
520
521         if (start >= final_page ||
522             (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
523                 goto no_more;
524
525         start++;
526         do {
527                 _debug("more %lx [%lx]", start, count);
528                 n = final_page - start + 1;
529                 if (n > ARRAY_SIZE(pages))
530                         n = ARRAY_SIZE(pages);
531                 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
532                 _debug("fgpc %u", n);
533                 if (n == 0)
534                         goto no_more;
535                 if (pages[0]->index != start) {
536                         do {
537                                 put_page(pages[--n]);
538                         } while (n > 0);
539                         goto no_more;
540                 }
541
542                 for (loop = 0; loop < n; loop++) {
543                         page = pages[loop];
544                         if (to != PAGE_SIZE &&
545                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
546                                 break;
547                         if (page->index > final_page)
548                                 break;
549                         if (!trylock_page(page))
550                                 break;
551                         if (!PageDirty(page) || PageWriteback(page)) {
552                                 unlock_page(page);
553                                 break;
554                         }
555
556                         priv = page_private(page);
557                         f = priv & AFS_PRIV_MAX;
558                         t = priv >> AFS_PRIV_SHIFT;
559                         if (f != 0 &&
560                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
561                                 unlock_page(page);
562                                 break;
563                         }
564                         to = t;
565
566                         trace_afs_page_dirty(vnode, tracepoint_string("store+"),
567                                              page->index, priv);
568
569                         if (!clear_page_dirty_for_io(page))
570                                 BUG();
571                         if (test_set_page_writeback(page))
572                                 BUG();
573                         unlock_page(page);
574                         put_page(page);
575                 }
576                 count += loop;
577                 if (loop < n) {
578                         for (; loop < n; loop++)
579                                 put_page(pages[loop]);
580                         goto no_more;
581                 }
582
583                 start += loop;
584         } while (start <= final_page && count < 65536);
585
586 no_more:
587         /* We now have a contiguous set of dirty pages, each with writeback
588          * set; the first page is still locked at this point, but all the rest
589          * have been unlocked.
590          */
591         unlock_page(primary_page);
592
593         first = primary_page->index;
594         last = first + count - 1;
595
596         end = (loff_t)last * PAGE_SIZE + to;
597         i_size = i_size_read(&vnode->vfs_inode);
598
599         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
600         if (end > i_size)
601                 to = i_size & ~PAGE_MASK;
602
603         ret = afs_store_data(mapping, first, last, offset, to);
604         switch (ret) {
605         case 0:
606                 ret = count;
607                 break;
608
609         default:
610                 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
611                 /* Fall through */
612         case -EACCES:
613         case -EPERM:
614         case -ENOKEY:
615         case -EKEYEXPIRED:
616         case -EKEYREJECTED:
617         case -EKEYREVOKED:
618                 afs_redirty_pages(wbc, mapping, first, last);
619                 mapping_set_error(mapping, ret);
620                 break;
621
622         case -EDQUOT:
623         case -ENOSPC:
624                 afs_redirty_pages(wbc, mapping, first, last);
625                 mapping_set_error(mapping, -ENOSPC);
626                 break;
627
628         case -EROFS:
629         case -EIO:
630         case -EREMOTEIO:
631         case -EFBIG:
632         case -ENOENT:
633         case -ENOMEDIUM:
634         case -ENXIO:
635                 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
636                 afs_kill_pages(mapping, first, last);
637                 mapping_set_error(mapping, ret);
638                 break;
639         }
640
641         _leave(" = %d", ret);
642         return ret;
643 }
644
645 /*
646  * write a page back to the server
647  * - the caller locked the page for us
648  */
649 int afs_writepage(struct page *page, struct writeback_control *wbc)
650 {
651         int ret;
652
653         _enter("{%lx},", page->index);
654
655         ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
656                                               wbc->range_end >> PAGE_SHIFT);
657         if (ret < 0) {
658                 _leave(" = %d", ret);
659                 return 0;
660         }
661
662         wbc->nr_to_write -= ret;
663
664         _leave(" = 0");
665         return 0;
666 }
667
668 /*
669  * write a region of pages back to the server
670  */
671 static int afs_writepages_region(struct address_space *mapping,
672                                  struct writeback_control *wbc,
673                                  pgoff_t index, pgoff_t end, pgoff_t *_next)
674 {
675         struct page *page;
676         int ret, n;
677
678         _enter(",,%lx,%lx,", index, end);
679
680         do {
681                 n = find_get_pages_range_tag(mapping, &index, end,
682                                         PAGECACHE_TAG_DIRTY, 1, &page);
683                 if (!n)
684                         break;
685
686                 _debug("wback %lx", page->index);
687
688                 /*
689                  * at this point we hold neither the i_pages lock nor the
690                  * page lock: the page may be truncated or invalidated
691                  * (changing page->mapping to NULL), or even swizzled
692                  * back from swapper_space to tmpfs file mapping
693                  */
694                 ret = lock_page_killable(page);
695                 if (ret < 0) {
696                         put_page(page);
697                         _leave(" = %d", ret);
698                         return ret;
699                 }
700
701                 if (page->mapping != mapping || !PageDirty(page)) {
702                         unlock_page(page);
703                         put_page(page);
704                         continue;
705                 }
706
707                 if (PageWriteback(page)) {
708                         unlock_page(page);
709                         if (wbc->sync_mode != WB_SYNC_NONE)
710                                 wait_on_page_writeback(page);
711                         put_page(page);
712                         continue;
713                 }
714
715                 if (!clear_page_dirty_for_io(page))
716                         BUG();
717                 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
718                 put_page(page);
719                 if (ret < 0) {
720                         _leave(" = %d", ret);
721                         return ret;
722                 }
723
724                 wbc->nr_to_write -= ret;
725
726                 cond_resched();
727         } while (index < end && wbc->nr_to_write > 0);
728
729         *_next = index;
730         _leave(" = 0 [%lx]", *_next);
731         return 0;
732 }
733
734 /*
735  * write some of the pending data back to the server
736  */
737 int afs_writepages(struct address_space *mapping,
738                    struct writeback_control *wbc)
739 {
740         pgoff_t start, end, next;
741         int ret;
742
743         _enter("");
744
745         if (wbc->range_cyclic) {
746                 start = mapping->writeback_index;
747                 end = -1;
748                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
749                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
750                         ret = afs_writepages_region(mapping, wbc, 0, start,
751                                                     &next);
752                 mapping->writeback_index = next;
753         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
754                 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
755                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
756                 if (wbc->nr_to_write > 0)
757                         mapping->writeback_index = next;
758         } else {
759                 start = wbc->range_start >> PAGE_SHIFT;
760                 end = wbc->range_end >> PAGE_SHIFT;
761                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
762         }
763
764         _leave(" = %d", ret);
765         return ret;
766 }
767
768 /*
769  * write to an AFS file
770  */
771 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
772 {
773         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
774         ssize_t result;
775         size_t count = iov_iter_count(from);
776
777         _enter("{%llx:%llu},{%zu},",
778                vnode->fid.vid, vnode->fid.vnode, count);
779
780         if (IS_SWAPFILE(&vnode->vfs_inode)) {
781                 printk(KERN_INFO
782                        "AFS: Attempt to write to active swap file!\n");
783                 return -EBUSY;
784         }
785
786         if (!count)
787                 return 0;
788
789         result = generic_file_write_iter(iocb, from);
790
791         _leave(" = %zd", result);
792         return result;
793 }
794
795 /*
796  * flush any dirty pages for this process, and check for write errors.
797  * - the return status from this call provides a reliable indication of
798  *   whether any write errors occurred for this process.
799  */
800 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
801 {
802         struct inode *inode = file_inode(file);
803         struct afs_vnode *vnode = AFS_FS_I(inode);
804
805         _enter("{%llx:%llu},{n=%pD},%d",
806                vnode->fid.vid, vnode->fid.vnode, file,
807                datasync);
808
809         return file_write_and_wait_range(file, start, end);
810 }
811
812 /*
813  * notification that a previously read-only page is about to become writable
814  * - if it returns an error, the caller will deliver a bus error signal
815  */
816 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
817 {
818         struct file *file = vmf->vma->vm_file;
819         struct inode *inode = file_inode(file);
820         struct afs_vnode *vnode = AFS_FS_I(inode);
821         unsigned long priv;
822
823         _enter("{{%llx:%llu}},{%lx}",
824                vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
825
826         sb_start_pagefault(inode->i_sb);
827
828         /* Wait for the page to be written to the cache before we allow it to
829          * be modified.  We then assume the entire page will need writing back.
830          */
831 #ifdef CONFIG_AFS_FSCACHE
832         fscache_wait_on_page_write(vnode->cache, vmf->page);
833 #endif
834
835         if (PageWriteback(vmf->page) &&
836             wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
837                 return VM_FAULT_RETRY;
838
839         if (lock_page_killable(vmf->page) < 0)
840                 return VM_FAULT_RETRY;
841
842         /* We mustn't change page->private until writeback is complete as that
843          * details the portion of the page we need to write back and we might
844          * need to redirty the page if there's a problem.
845          */
846         wait_on_page_writeback(vmf->page);
847
848         priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
849         priv |= 0; /* From */
850         trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
851                              vmf->page->index, priv);
852         SetPagePrivate(vmf->page);
853         set_page_private(vmf->page, priv);
854         file_update_time(file);
855
856         sb_end_pagefault(inode->i_sb);
857         return VM_FAULT_LOCKED;
858 }
859
860 /*
861  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
862  */
863 void afs_prune_wb_keys(struct afs_vnode *vnode)
864 {
865         LIST_HEAD(graveyard);
866         struct afs_wb_key *wbk, *tmp;
867
868         /* Discard unused keys */
869         spin_lock(&vnode->wb_lock);
870
871         if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
872             !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
873                 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
874                         if (refcount_read(&wbk->usage) == 1)
875                                 list_move(&wbk->vnode_link, &graveyard);
876                 }
877         }
878
879         spin_unlock(&vnode->wb_lock);
880
881         while (!list_empty(&graveyard)) {
882                 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
883                 list_del(&wbk->vnode_link);
884                 afs_put_wb_key(wbk);
885         }
886 }
887
888 /*
889  * Clean up a page during invalidation.
890  */
891 int afs_launder_page(struct page *page)
892 {
893         struct address_space *mapping = page->mapping;
894         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
895         unsigned long priv;
896         unsigned int f, t;
897         int ret = 0;
898
899         _enter("{%lx}", page->index);
900
901         priv = page_private(page);
902         if (clear_page_dirty_for_io(page)) {
903                 f = 0;
904                 t = PAGE_SIZE;
905                 if (PagePrivate(page)) {
906                         f = priv & AFS_PRIV_MAX;
907                         t = priv >> AFS_PRIV_SHIFT;
908                 }
909
910                 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
911                                      page->index, priv);
912                 ret = afs_store_data(mapping, page->index, page->index, t, f);
913         }
914
915         trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
916                              page->index, priv);
917         set_page_private(page, 0);
918         ClearPagePrivate(page);
919
920 #ifdef CONFIG_AFS_FSCACHE
921         if (PageFsCache(page)) {
922                 fscache_wait_on_page_write(vnode->cache, page);
923                 fscache_uncache_page(vnode->cache, page);
924         }
925 #endif
926         return ret;
927 }