afs: Fix application of status and callback to be under same lock
[linux-2.6-microblaze.git] / fs / afs / write.c
1 /* handling of writes to regular files and writing back to the server
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/backing-dev.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 #include <linux/pagevec.h>
18 #include "internal.h"
19
20 /*
21  * mark a page as having been made dirty and thus needing writeback
22  */
23 int afs_set_page_dirty(struct page *page)
24 {
25         _enter("");
26         return __set_page_dirty_nobuffers(page);
27 }
28
29 /*
30  * partly or wholly fill a page that's under preparation for writing
31  */
32 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
33                          loff_t pos, unsigned int len, struct page *page)
34 {
35         struct afs_read *req;
36         size_t p;
37         void *data;
38         int ret;
39
40         _enter(",,%llu", (unsigned long long)pos);
41
42         if (pos >= vnode->vfs_inode.i_size) {
43                 p = pos & ~PAGE_MASK;
44                 ASSERTCMP(p + len, <=, PAGE_SIZE);
45                 data = kmap(page);
46                 memset(data + p, 0, len);
47                 kunmap(page);
48                 return 0;
49         }
50
51         req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
52                       GFP_KERNEL);
53         if (!req)
54                 return -ENOMEM;
55
56         refcount_set(&req->usage, 1);
57         req->pos = pos;
58         req->len = len;
59         req->nr_pages = 1;
60         req->pages = req->array;
61         req->pages[0] = page;
62         get_page(page);
63
64         ret = afs_fetch_data(vnode, key, req);
65         afs_put_read(req);
66         if (ret < 0) {
67                 if (ret == -ENOENT) {
68                         _debug("got NOENT from server"
69                                " - marking file deleted and stale");
70                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
71                         ret = -ESTALE;
72                 }
73         }
74
75         _leave(" = %d", ret);
76         return ret;
77 }
78
79 /*
80  * prepare to perform part of a write to a page
81  */
82 int afs_write_begin(struct file *file, struct address_space *mapping,
83                     loff_t pos, unsigned len, unsigned flags,
84                     struct page **pagep, void **fsdata)
85 {
86         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
87         struct page *page;
88         struct key *key = afs_file_key(file);
89         unsigned long priv;
90         unsigned f, from = pos & (PAGE_SIZE - 1);
91         unsigned t, to = from + len;
92         pgoff_t index = pos >> PAGE_SHIFT;
93         int ret;
94
95         _enter("{%llx:%llu},{%lx},%u,%u",
96                vnode->fid.vid, vnode->fid.vnode, index, from, to);
97
98         /* We want to store information about how much of a page is altered in
99          * page->private.
100          */
101         BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
102
103         page = grab_cache_page_write_begin(mapping, index, flags);
104         if (!page)
105                 return -ENOMEM;
106
107         if (!PageUptodate(page) && len != PAGE_SIZE) {
108                 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
109                 if (ret < 0) {
110                         unlock_page(page);
111                         put_page(page);
112                         _leave(" = %d [prep]", ret);
113                         return ret;
114                 }
115                 SetPageUptodate(page);
116         }
117
118         /* page won't leak in error case: it eventually gets cleaned off LRU */
119         *pagep = page;
120
121 try_again:
122         /* See if this page is already partially written in a way that we can
123          * merge the new write with.
124          */
125         t = f = 0;
126         if (PagePrivate(page)) {
127                 priv = page_private(page);
128                 f = priv & AFS_PRIV_MAX;
129                 t = priv >> AFS_PRIV_SHIFT;
130                 ASSERTCMP(f, <=, t);
131         }
132
133         if (f != t) {
134                 if (PageWriteback(page)) {
135                         trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
136                                              page->index, priv);
137                         goto flush_conflicting_write;
138                 }
139                 /* If the file is being filled locally, allow inter-write
140                  * spaces to be merged into writes.  If it's not, only write
141                  * back what the user gives us.
142                  */
143                 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
144                     (to < f || from > t))
145                         goto flush_conflicting_write;
146                 if (from < f)
147                         f = from;
148                 if (to > t)
149                         t = to;
150         } else {
151                 f = from;
152                 t = to;
153         }
154
155         priv = (unsigned long)t << AFS_PRIV_SHIFT;
156         priv |= f;
157         trace_afs_page_dirty(vnode, tracepoint_string("begin"),
158                              page->index, priv);
159         SetPagePrivate(page);
160         set_page_private(page, priv);
161         _leave(" = 0");
162         return 0;
163
164         /* The previous write and this write aren't adjacent or overlapping, so
165          * flush the page out.
166          */
167 flush_conflicting_write:
168         _debug("flush conflict");
169         ret = write_one_page(page);
170         if (ret < 0) {
171                 _leave(" = %d", ret);
172                 return ret;
173         }
174
175         ret = lock_page_killable(page);
176         if (ret < 0) {
177                 _leave(" = %d", ret);
178                 return ret;
179         }
180         goto try_again;
181 }
182
183 /*
184  * finalise part of a write to a page
185  */
186 int afs_write_end(struct file *file, struct address_space *mapping,
187                   loff_t pos, unsigned len, unsigned copied,
188                   struct page *page, void *fsdata)
189 {
190         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
191         struct key *key = afs_file_key(file);
192         loff_t i_size, maybe_i_size;
193         int ret;
194
195         _enter("{%llx:%llu},{%lx}",
196                vnode->fid.vid, vnode->fid.vnode, page->index);
197
198         maybe_i_size = pos + copied;
199
200         i_size = i_size_read(&vnode->vfs_inode);
201         if (maybe_i_size > i_size) {
202                 spin_lock(&vnode->wb_lock);
203                 i_size = i_size_read(&vnode->vfs_inode);
204                 if (maybe_i_size > i_size)
205                         i_size_write(&vnode->vfs_inode, maybe_i_size);
206                 spin_unlock(&vnode->wb_lock);
207         }
208
209         if (!PageUptodate(page)) {
210                 if (copied < len) {
211                         /* Try and load any missing data from the server.  The
212                          * unmarshalling routine will take care of clearing any
213                          * bits that are beyond the EOF.
214                          */
215                         ret = afs_fill_page(vnode, key, pos + copied,
216                                             len - copied, page);
217                         if (ret < 0)
218                                 goto out;
219                 }
220                 SetPageUptodate(page);
221         }
222
223         set_page_dirty(page);
224         if (PageDirty(page))
225                 _debug("dirtied");
226         ret = copied;
227
228 out:
229         unlock_page(page);
230         put_page(page);
231         return ret;
232 }
233
234 /*
235  * kill all the pages in the given range
236  */
237 static void afs_kill_pages(struct address_space *mapping,
238                            pgoff_t first, pgoff_t last)
239 {
240         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
241         struct pagevec pv;
242         unsigned count, loop;
243
244         _enter("{%llx:%llu},%lx-%lx",
245                vnode->fid.vid, vnode->fid.vnode, first, last);
246
247         pagevec_init(&pv);
248
249         do {
250                 _debug("kill %lx-%lx", first, last);
251
252                 count = last - first + 1;
253                 if (count > PAGEVEC_SIZE)
254                         count = PAGEVEC_SIZE;
255                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
256                 ASSERTCMP(pv.nr, ==, count);
257
258                 for (loop = 0; loop < count; loop++) {
259                         struct page *page = pv.pages[loop];
260                         ClearPageUptodate(page);
261                         SetPageError(page);
262                         end_page_writeback(page);
263                         if (page->index >= first)
264                                 first = page->index + 1;
265                         lock_page(page);
266                         generic_error_remove_page(mapping, page);
267                         unlock_page(page);
268                 }
269
270                 __pagevec_release(&pv);
271         } while (first <= last);
272
273         _leave("");
274 }
275
276 /*
277  * Redirty all the pages in a given range.
278  */
279 static void afs_redirty_pages(struct writeback_control *wbc,
280                               struct address_space *mapping,
281                               pgoff_t first, pgoff_t last)
282 {
283         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
284         struct pagevec pv;
285         unsigned count, loop;
286
287         _enter("{%llx:%llu},%lx-%lx",
288                vnode->fid.vid, vnode->fid.vnode, first, last);
289
290         pagevec_init(&pv);
291
292         do {
293                 _debug("redirty %lx-%lx", first, last);
294
295                 count = last - first + 1;
296                 if (count > PAGEVEC_SIZE)
297                         count = PAGEVEC_SIZE;
298                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
299                 ASSERTCMP(pv.nr, ==, count);
300
301                 for (loop = 0; loop < count; loop++) {
302                         struct page *page = pv.pages[loop];
303
304                         redirty_page_for_writepage(wbc, page);
305                         end_page_writeback(page);
306                         if (page->index >= first)
307                                 first = page->index + 1;
308                 }
309
310                 __pagevec_release(&pv);
311         } while (first <= last);
312
313         _leave("");
314 }
315
316 /*
317  * completion of write to server
318  */
319 static void afs_pages_written_back(struct afs_vnode *vnode,
320                                    pgoff_t first, pgoff_t last)
321 {
322         struct pagevec pv;
323         unsigned long priv;
324         unsigned count, loop;
325
326         _enter("{%llx:%llu},{%lx-%lx}",
327                vnode->fid.vid, vnode->fid.vnode, first, last);
328
329         pagevec_init(&pv);
330
331         do {
332                 _debug("done %lx-%lx", first, last);
333
334                 count = last - first + 1;
335                 if (count > PAGEVEC_SIZE)
336                         count = PAGEVEC_SIZE;
337                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
338                                               first, count, pv.pages);
339                 ASSERTCMP(pv.nr, ==, count);
340
341                 for (loop = 0; loop < count; loop++) {
342                         priv = page_private(pv.pages[loop]);
343                         trace_afs_page_dirty(vnode, tracepoint_string("clear"),
344                                              pv.pages[loop]->index, priv);
345                         set_page_private(pv.pages[loop], 0);
346                         end_page_writeback(pv.pages[loop]);
347                 }
348                 first += count;
349                 __pagevec_release(&pv);
350         } while (first <= last);
351
352         afs_prune_wb_keys(vnode);
353         _leave("");
354 }
355
356 /*
357  * write to a file
358  */
359 static int afs_store_data(struct address_space *mapping,
360                           pgoff_t first, pgoff_t last,
361                           unsigned offset, unsigned to)
362 {
363         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
364         struct afs_fs_cursor fc;
365         struct afs_status_cb *scb;
366         struct afs_wb_key *wbk = NULL;
367         struct list_head *p;
368         int ret = -ENOKEY, ret2;
369
370         _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
371                vnode->volume->name,
372                vnode->fid.vid,
373                vnode->fid.vnode,
374                vnode->fid.unique,
375                first, last, offset, to);
376
377         scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
378         if (!scb)
379                 return -ENOMEM;
380
381         spin_lock(&vnode->wb_lock);
382         p = vnode->wb_keys.next;
383
384         /* Iterate through the list looking for a valid key to use. */
385 try_next_key:
386         while (p != &vnode->wb_keys) {
387                 wbk = list_entry(p, struct afs_wb_key, vnode_link);
388                 _debug("wbk %u", key_serial(wbk->key));
389                 ret2 = key_validate(wbk->key);
390                 if (ret2 == 0)
391                         goto found_key;
392                 if (ret == -ENOKEY)
393                         ret = ret2;
394                 p = p->next;
395         }
396
397         spin_unlock(&vnode->wb_lock);
398         afs_put_wb_key(wbk);
399         kfree(scb);
400         _leave(" = %d [no keys]", ret);
401         return ret;
402
403 found_key:
404         refcount_inc(&wbk->usage);
405         spin_unlock(&vnode->wb_lock);
406
407         _debug("USE WB KEY %u", key_serial(wbk->key));
408
409         ret = -ERESTARTSYS;
410         if (afs_begin_vnode_operation(&fc, vnode, wbk->key, false)) {
411                 afs_dataversion_t data_version = vnode->status.data_version + 1;
412
413                 while (afs_select_fileserver(&fc)) {
414                         fc.cb_break = afs_calc_vnode_cb_break(vnode);
415                         afs_fs_store_data(&fc, mapping, first, last, offset, to, scb);
416                 }
417
418                 afs_check_for_remote_deletion(&fc, vnode);
419                 afs_vnode_commit_status(&fc, vnode, fc.cb_break,
420                                         &data_version, scb);
421                 if (fc.ac.error == 0)
422                         afs_pages_written_back(vnode, first, last);
423                 ret = afs_end_vnode_operation(&fc);
424         }
425
426         switch (ret) {
427         case 0:
428                 afs_stat_v(vnode, n_stores);
429                 atomic_long_add((last * PAGE_SIZE + to) -
430                                 (first * PAGE_SIZE + offset),
431                                 &afs_v2net(vnode)->n_store_bytes);
432                 break;
433         case -EACCES:
434         case -EPERM:
435         case -ENOKEY:
436         case -EKEYEXPIRED:
437         case -EKEYREJECTED:
438         case -EKEYREVOKED:
439                 _debug("next");
440                 spin_lock(&vnode->wb_lock);
441                 p = wbk->vnode_link.next;
442                 afs_put_wb_key(wbk);
443                 goto try_next_key;
444         }
445
446         afs_put_wb_key(wbk);
447         kfree(scb);
448         _leave(" = %d", ret);
449         return ret;
450 }
451
452 /*
453  * Synchronously write back the locked page and any subsequent non-locked dirty
454  * pages.
455  */
456 static int afs_write_back_from_locked_page(struct address_space *mapping,
457                                            struct writeback_control *wbc,
458                                            struct page *primary_page,
459                                            pgoff_t final_page)
460 {
461         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
462         struct page *pages[8], *page;
463         unsigned long count, priv;
464         unsigned n, offset, to, f, t;
465         pgoff_t start, first, last;
466         int loop, ret;
467
468         _enter(",%lx", primary_page->index);
469
470         count = 1;
471         if (test_set_page_writeback(primary_page))
472                 BUG();
473
474         /* Find all consecutive lockable dirty pages that have contiguous
475          * written regions, stopping when we find a page that is not
476          * immediately lockable, is not dirty or is missing, or we reach the
477          * end of the range.
478          */
479         start = primary_page->index;
480         priv = page_private(primary_page);
481         offset = priv & AFS_PRIV_MAX;
482         to = priv >> AFS_PRIV_SHIFT;
483         trace_afs_page_dirty(vnode, tracepoint_string("store"),
484                              primary_page->index, priv);
485
486         WARN_ON(offset == to);
487         if (offset == to)
488                 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
489                                      primary_page->index, priv);
490
491         if (start >= final_page ||
492             (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
493                 goto no_more;
494
495         start++;
496         do {
497                 _debug("more %lx [%lx]", start, count);
498                 n = final_page - start + 1;
499                 if (n > ARRAY_SIZE(pages))
500                         n = ARRAY_SIZE(pages);
501                 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
502                 _debug("fgpc %u", n);
503                 if (n == 0)
504                         goto no_more;
505                 if (pages[0]->index != start) {
506                         do {
507                                 put_page(pages[--n]);
508                         } while (n > 0);
509                         goto no_more;
510                 }
511
512                 for (loop = 0; loop < n; loop++) {
513                         page = pages[loop];
514                         if (to != PAGE_SIZE &&
515                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
516                                 break;
517                         if (page->index > final_page)
518                                 break;
519                         if (!trylock_page(page))
520                                 break;
521                         if (!PageDirty(page) || PageWriteback(page)) {
522                                 unlock_page(page);
523                                 break;
524                         }
525
526                         priv = page_private(page);
527                         f = priv & AFS_PRIV_MAX;
528                         t = priv >> AFS_PRIV_SHIFT;
529                         if (f != 0 &&
530                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
531                                 unlock_page(page);
532                                 break;
533                         }
534                         to = t;
535
536                         trace_afs_page_dirty(vnode, tracepoint_string("store+"),
537                                              page->index, priv);
538
539                         if (!clear_page_dirty_for_io(page))
540                                 BUG();
541                         if (test_set_page_writeback(page))
542                                 BUG();
543                         unlock_page(page);
544                         put_page(page);
545                 }
546                 count += loop;
547                 if (loop < n) {
548                         for (; loop < n; loop++)
549                                 put_page(pages[loop]);
550                         goto no_more;
551                 }
552
553                 start += loop;
554         } while (start <= final_page && count < 65536);
555
556 no_more:
557         /* We now have a contiguous set of dirty pages, each with writeback
558          * set; the first page is still locked at this point, but all the rest
559          * have been unlocked.
560          */
561         unlock_page(primary_page);
562
563         first = primary_page->index;
564         last = first + count - 1;
565
566         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
567
568         ret = afs_store_data(mapping, first, last, offset, to);
569         switch (ret) {
570         case 0:
571                 ret = count;
572                 break;
573
574         default:
575                 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
576                 /* Fall through */
577         case -EACCES:
578         case -EPERM:
579         case -ENOKEY:
580         case -EKEYEXPIRED:
581         case -EKEYREJECTED:
582         case -EKEYREVOKED:
583                 afs_redirty_pages(wbc, mapping, first, last);
584                 mapping_set_error(mapping, ret);
585                 break;
586
587         case -EDQUOT:
588         case -ENOSPC:
589                 afs_redirty_pages(wbc, mapping, first, last);
590                 mapping_set_error(mapping, -ENOSPC);
591                 break;
592
593         case -EROFS:
594         case -EIO:
595         case -EREMOTEIO:
596         case -EFBIG:
597         case -ENOENT:
598         case -ENOMEDIUM:
599         case -ENXIO:
600                 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
601                 afs_kill_pages(mapping, first, last);
602                 mapping_set_error(mapping, ret);
603                 break;
604         }
605
606         _leave(" = %d", ret);
607         return ret;
608 }
609
610 /*
611  * write a page back to the server
612  * - the caller locked the page for us
613  */
614 int afs_writepage(struct page *page, struct writeback_control *wbc)
615 {
616         int ret;
617
618         _enter("{%lx},", page->index);
619
620         ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
621                                               wbc->range_end >> PAGE_SHIFT);
622         if (ret < 0) {
623                 _leave(" = %d", ret);
624                 return 0;
625         }
626
627         wbc->nr_to_write -= ret;
628
629         _leave(" = 0");
630         return 0;
631 }
632
633 /*
634  * write a region of pages back to the server
635  */
636 static int afs_writepages_region(struct address_space *mapping,
637                                  struct writeback_control *wbc,
638                                  pgoff_t index, pgoff_t end, pgoff_t *_next)
639 {
640         struct page *page;
641         int ret, n;
642
643         _enter(",,%lx,%lx,", index, end);
644
645         do {
646                 n = find_get_pages_range_tag(mapping, &index, end,
647                                         PAGECACHE_TAG_DIRTY, 1, &page);
648                 if (!n)
649                         break;
650
651                 _debug("wback %lx", page->index);
652
653                 /*
654                  * at this point we hold neither the i_pages lock nor the
655                  * page lock: the page may be truncated or invalidated
656                  * (changing page->mapping to NULL), or even swizzled
657                  * back from swapper_space to tmpfs file mapping
658                  */
659                 ret = lock_page_killable(page);
660                 if (ret < 0) {
661                         put_page(page);
662                         _leave(" = %d", ret);
663                         return ret;
664                 }
665
666                 if (page->mapping != mapping || !PageDirty(page)) {
667                         unlock_page(page);
668                         put_page(page);
669                         continue;
670                 }
671
672                 if (PageWriteback(page)) {
673                         unlock_page(page);
674                         if (wbc->sync_mode != WB_SYNC_NONE)
675                                 wait_on_page_writeback(page);
676                         put_page(page);
677                         continue;
678                 }
679
680                 if (!clear_page_dirty_for_io(page))
681                         BUG();
682                 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
683                 put_page(page);
684                 if (ret < 0) {
685                         _leave(" = %d", ret);
686                         return ret;
687                 }
688
689                 wbc->nr_to_write -= ret;
690
691                 cond_resched();
692         } while (index < end && wbc->nr_to_write > 0);
693
694         *_next = index;
695         _leave(" = 0 [%lx]", *_next);
696         return 0;
697 }
698
699 /*
700  * write some of the pending data back to the server
701  */
702 int afs_writepages(struct address_space *mapping,
703                    struct writeback_control *wbc)
704 {
705         pgoff_t start, end, next;
706         int ret;
707
708         _enter("");
709
710         if (wbc->range_cyclic) {
711                 start = mapping->writeback_index;
712                 end = -1;
713                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
714                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
715                         ret = afs_writepages_region(mapping, wbc, 0, start,
716                                                     &next);
717                 mapping->writeback_index = next;
718         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
719                 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
720                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
721                 if (wbc->nr_to_write > 0)
722                         mapping->writeback_index = next;
723         } else {
724                 start = wbc->range_start >> PAGE_SHIFT;
725                 end = wbc->range_end >> PAGE_SHIFT;
726                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
727         }
728
729         _leave(" = %d", ret);
730         return ret;
731 }
732
733 /*
734  * write to an AFS file
735  */
736 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
737 {
738         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
739         ssize_t result;
740         size_t count = iov_iter_count(from);
741
742         _enter("{%llx:%llu},{%zu},",
743                vnode->fid.vid, vnode->fid.vnode, count);
744
745         if (IS_SWAPFILE(&vnode->vfs_inode)) {
746                 printk(KERN_INFO
747                        "AFS: Attempt to write to active swap file!\n");
748                 return -EBUSY;
749         }
750
751         if (!count)
752                 return 0;
753
754         result = generic_file_write_iter(iocb, from);
755
756         _leave(" = %zd", result);
757         return result;
758 }
759
760 /*
761  * flush any dirty pages for this process, and check for write errors.
762  * - the return status from this call provides a reliable indication of
763  *   whether any write errors occurred for this process.
764  */
765 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
766 {
767         struct inode *inode = file_inode(file);
768         struct afs_vnode *vnode = AFS_FS_I(inode);
769
770         _enter("{%llx:%llu},{n=%pD},%d",
771                vnode->fid.vid, vnode->fid.vnode, file,
772                datasync);
773
774         return file_write_and_wait_range(file, start, end);
775 }
776
777 /*
778  * notification that a previously read-only page is about to become writable
779  * - if it returns an error, the caller will deliver a bus error signal
780  */
781 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
782 {
783         struct file *file = vmf->vma->vm_file;
784         struct inode *inode = file_inode(file);
785         struct afs_vnode *vnode = AFS_FS_I(inode);
786         unsigned long priv;
787
788         _enter("{{%llx:%llu}},{%lx}",
789                vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
790
791         sb_start_pagefault(inode->i_sb);
792
793         /* Wait for the page to be written to the cache before we allow it to
794          * be modified.  We then assume the entire page will need writing back.
795          */
796 #ifdef CONFIG_AFS_FSCACHE
797         fscache_wait_on_page_write(vnode->cache, vmf->page);
798 #endif
799
800         if (PageWriteback(vmf->page) &&
801             wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
802                 return VM_FAULT_RETRY;
803
804         if (lock_page_killable(vmf->page) < 0)
805                 return VM_FAULT_RETRY;
806
807         /* We mustn't change page->private until writeback is complete as that
808          * details the portion of the page we need to write back and we might
809          * need to redirty the page if there's a problem.
810          */
811         wait_on_page_writeback(vmf->page);
812
813         priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
814         priv |= 0; /* From */
815         trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
816                              vmf->page->index, priv);
817         SetPagePrivate(vmf->page);
818         set_page_private(vmf->page, priv);
819
820         sb_end_pagefault(inode->i_sb);
821         return VM_FAULT_LOCKED;
822 }
823
824 /*
825  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
826  */
827 void afs_prune_wb_keys(struct afs_vnode *vnode)
828 {
829         LIST_HEAD(graveyard);
830         struct afs_wb_key *wbk, *tmp;
831
832         /* Discard unused keys */
833         spin_lock(&vnode->wb_lock);
834
835         if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
836             !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
837                 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
838                         if (refcount_read(&wbk->usage) == 1)
839                                 list_move(&wbk->vnode_link, &graveyard);
840                 }
841         }
842
843         spin_unlock(&vnode->wb_lock);
844
845         while (!list_empty(&graveyard)) {
846                 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
847                 list_del(&wbk->vnode_link);
848                 afs_put_wb_key(wbk);
849         }
850 }
851
852 /*
853  * Clean up a page during invalidation.
854  */
855 int afs_launder_page(struct page *page)
856 {
857         struct address_space *mapping = page->mapping;
858         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
859         unsigned long priv;
860         unsigned int f, t;
861         int ret = 0;
862
863         _enter("{%lx}", page->index);
864
865         priv = page_private(page);
866         if (clear_page_dirty_for_io(page)) {
867                 f = 0;
868                 t = PAGE_SIZE;
869                 if (PagePrivate(page)) {
870                         f = priv & AFS_PRIV_MAX;
871                         t = priv >> AFS_PRIV_SHIFT;
872                 }
873
874                 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
875                                      page->index, priv);
876                 ret = afs_store_data(mapping, page->index, page->index, t, f);
877         }
878
879         trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
880                              page->index, priv);
881         set_page_private(page, 0);
882         ClearPagePrivate(page);
883
884 #ifdef CONFIG_AFS_FSCACHE
885         if (PageFsCache(page)) {
886                 fscache_wait_on_page_write(vnode->cache, page);
887                 fscache_uncache_page(vnode->cache, page);
888         }
889 #endif
890         return ret;
891 }