tools headers UAPI: Sync drm/i915_drm.h with the kernel sources
[linux-2.6-microblaze.git] / fs / fscache / page.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Cache page management and data I/O routines
3  *
4  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #define FSCACHE_DEBUG_LEVEL PAGE
9 #include <linux/module.h>
10 #include <linux/fscache-cache.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagevec.h>
13 #include <linux/slab.h>
14 #include "internal.h"
15
16 /*
17  * check to see if a page is being written to the cache
18  */
19 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
20 {
21         void *val;
22
23         rcu_read_lock();
24         val = radix_tree_lookup(&cookie->stores, page->index);
25         rcu_read_unlock();
26         trace_fscache_check_page(cookie, page, val, 0);
27
28         return val != NULL;
29 }
30 EXPORT_SYMBOL(__fscache_check_page_write);
31
32 /*
33  * wait for a page to finish being written to the cache
34  */
35 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
36 {
37         wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
38
39         trace_fscache_page(cookie, page, fscache_page_write_wait);
40
41         wait_event(*wq, !__fscache_check_page_write(cookie, page));
42 }
43 EXPORT_SYMBOL(__fscache_wait_on_page_write);
44
45 /*
46  * wait for a page to finish being written to the cache. Put a timeout here
47  * since we might be called recursively via parent fs.
48  */
49 static
50 bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
51 {
52         wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
53
54         return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
55                                   HZ);
56 }
57
58 /*
59  * decide whether a page can be released, possibly by cancelling a store to it
60  * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
61  */
62 bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
63                                   struct page *page,
64                                   gfp_t gfp)
65 {
66         struct page *xpage;
67         void *val;
68
69         _enter("%p,%p,%x", cookie, page, gfp);
70
71         trace_fscache_page(cookie, page, fscache_page_maybe_release);
72
73 try_again:
74         rcu_read_lock();
75         val = radix_tree_lookup(&cookie->stores, page->index);
76         if (!val) {
77                 rcu_read_unlock();
78                 fscache_stat(&fscache_n_store_vmscan_not_storing);
79                 __fscache_uncache_page(cookie, page);
80                 return true;
81         }
82
83         /* see if the page is actually undergoing storage - if so we can't get
84          * rid of it till the cache has finished with it */
85         if (radix_tree_tag_get(&cookie->stores, page->index,
86                                FSCACHE_COOKIE_STORING_TAG)) {
87                 rcu_read_unlock();
88                 goto page_busy;
89         }
90
91         /* the page is pending storage, so we attempt to cancel the store and
92          * discard the store request so that the page can be reclaimed */
93         spin_lock(&cookie->stores_lock);
94         rcu_read_unlock();
95
96         if (radix_tree_tag_get(&cookie->stores, page->index,
97                                FSCACHE_COOKIE_STORING_TAG)) {
98                 /* the page started to undergo storage whilst we were looking,
99                  * so now we can only wait or return */
100                 spin_unlock(&cookie->stores_lock);
101                 goto page_busy;
102         }
103
104         xpage = radix_tree_delete(&cookie->stores, page->index);
105         trace_fscache_page(cookie, page, fscache_page_radix_delete);
106         spin_unlock(&cookie->stores_lock);
107
108         if (xpage) {
109                 fscache_stat(&fscache_n_store_vmscan_cancelled);
110                 fscache_stat(&fscache_n_store_radix_deletes);
111                 ASSERTCMP(xpage, ==, page);
112         } else {
113                 fscache_stat(&fscache_n_store_vmscan_gone);
114         }
115
116         wake_up_bit(&cookie->flags, 0);
117         trace_fscache_wake_cookie(cookie);
118         if (xpage)
119                 put_page(xpage);
120         __fscache_uncache_page(cookie, page);
121         return true;
122
123 page_busy:
124         /* We will wait here if we're allowed to, but that could deadlock the
125          * allocator as the work threads writing to the cache may all end up
126          * sleeping on memory allocation, so we may need to impose a timeout
127          * too. */
128         if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
129                 fscache_stat(&fscache_n_store_vmscan_busy);
130                 return false;
131         }
132
133         fscache_stat(&fscache_n_store_vmscan_wait);
134         if (!release_page_wait_timeout(cookie, page))
135                 _debug("fscache writeout timeout page: %p{%lx}",
136                         page, page->index);
137
138         gfp &= ~__GFP_DIRECT_RECLAIM;
139         goto try_again;
140 }
141 EXPORT_SYMBOL(__fscache_maybe_release_page);
142
143 /*
144  * note that a page has finished being written to the cache
145  */
146 static void fscache_end_page_write(struct fscache_object *object,
147                                    struct page *page)
148 {
149         struct fscache_cookie *cookie;
150         struct page *xpage = NULL, *val;
151
152         spin_lock(&object->lock);
153         cookie = object->cookie;
154         if (cookie) {
155                 /* delete the page from the tree if it is now no longer
156                  * pending */
157                 spin_lock(&cookie->stores_lock);
158                 radix_tree_tag_clear(&cookie->stores, page->index,
159                                      FSCACHE_COOKIE_STORING_TAG);
160                 trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
161                 if (!radix_tree_tag_get(&cookie->stores, page->index,
162                                         FSCACHE_COOKIE_PENDING_TAG)) {
163                         fscache_stat(&fscache_n_store_radix_deletes);
164                         xpage = radix_tree_delete(&cookie->stores, page->index);
165                         trace_fscache_page(cookie, page, fscache_page_radix_delete);
166                         trace_fscache_page(cookie, page, fscache_page_write_end);
167
168                         val = radix_tree_lookup(&cookie->stores, page->index);
169                         trace_fscache_check_page(cookie, page, val, 1);
170                 } else {
171                         trace_fscache_page(cookie, page, fscache_page_write_end_pend);
172                 }
173                 spin_unlock(&cookie->stores_lock);
174                 wake_up_bit(&cookie->flags, 0);
175                 trace_fscache_wake_cookie(cookie);
176         } else {
177                 trace_fscache_page(cookie, page, fscache_page_write_end_noc);
178         }
179         spin_unlock(&object->lock);
180         if (xpage)
181                 put_page(xpage);
182 }
183
184 /*
185  * actually apply the changed attributes to a cache object
186  */
187 static void fscache_attr_changed_op(struct fscache_operation *op)
188 {
189         struct fscache_object *object = op->object;
190         int ret;
191
192         _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
193
194         fscache_stat(&fscache_n_attr_changed_calls);
195
196         if (fscache_object_is_active(object)) {
197                 fscache_stat(&fscache_n_cop_attr_changed);
198                 ret = object->cache->ops->attr_changed(object);
199                 fscache_stat_d(&fscache_n_cop_attr_changed);
200                 if (ret < 0)
201                         fscache_abort_object(object);
202                 fscache_op_complete(op, ret < 0);
203         } else {
204                 fscache_op_complete(op, true);
205         }
206
207         _leave("");
208 }
209
210 /*
211  * notification that the attributes on an object have changed
212  */
213 int __fscache_attr_changed(struct fscache_cookie *cookie)
214 {
215         struct fscache_operation *op;
216         struct fscache_object *object;
217         bool wake_cookie = false;
218
219         _enter("%p", cookie);
220
221         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
222
223         fscache_stat(&fscache_n_attr_changed);
224
225         op = kzalloc(sizeof(*op), GFP_KERNEL);
226         if (!op) {
227                 fscache_stat(&fscache_n_attr_changed_nomem);
228                 _leave(" = -ENOMEM");
229                 return -ENOMEM;
230         }
231
232         fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL);
233         trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed);
234         op->flags = FSCACHE_OP_ASYNC |
235                 (1 << FSCACHE_OP_EXCLUSIVE) |
236                 (1 << FSCACHE_OP_UNUSE_COOKIE);
237
238         spin_lock(&cookie->lock);
239
240         if (!fscache_cookie_enabled(cookie) ||
241             hlist_empty(&cookie->backing_objects))
242                 goto nobufs;
243         object = hlist_entry(cookie->backing_objects.first,
244                              struct fscache_object, cookie_link);
245
246         __fscache_use_cookie(cookie);
247         if (fscache_submit_exclusive_op(object, op) < 0)
248                 goto nobufs_dec;
249         spin_unlock(&cookie->lock);
250         fscache_stat(&fscache_n_attr_changed_ok);
251         fscache_put_operation(op);
252         _leave(" = 0");
253         return 0;
254
255 nobufs_dec:
256         wake_cookie = __fscache_unuse_cookie(cookie);
257 nobufs:
258         spin_unlock(&cookie->lock);
259         fscache_put_operation(op);
260         if (wake_cookie)
261                 __fscache_wake_unused_cookie(cookie);
262         fscache_stat(&fscache_n_attr_changed_nobufs);
263         _leave(" = %d", -ENOBUFS);
264         return -ENOBUFS;
265 }
266 EXPORT_SYMBOL(__fscache_attr_changed);
267
268 /*
269  * Handle cancellation of a pending retrieval op
270  */
271 static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
272 {
273         struct fscache_retrieval *op =
274                 container_of(_op, struct fscache_retrieval, op);
275
276         atomic_set(&op->n_pages, 0);
277 }
278
279 /*
280  * release a retrieval op reference
281  */
282 static void fscache_release_retrieval_op(struct fscache_operation *_op)
283 {
284         struct fscache_retrieval *op =
285                 container_of(_op, struct fscache_retrieval, op);
286
287         _enter("{OP%x}", op->op.debug_id);
288
289         ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
290                     atomic_read(&op->n_pages), ==, 0);
291
292         fscache_hist(fscache_retrieval_histogram, op->start_time);
293         if (op->context)
294                 fscache_put_context(op->cookie, op->context);
295
296         _leave("");
297 }
298
299 /*
300  * allocate a retrieval op
301  */
302 static struct fscache_retrieval *fscache_alloc_retrieval(
303         struct fscache_cookie *cookie,
304         struct address_space *mapping,
305         fscache_rw_complete_t end_io_func,
306         void *context)
307 {
308         struct fscache_retrieval *op;
309
310         /* allocate a retrieval operation and attempt to submit it */
311         op = kzalloc(sizeof(*op), GFP_NOIO);
312         if (!op) {
313                 fscache_stat(&fscache_n_retrievals_nomem);
314                 return NULL;
315         }
316
317         fscache_operation_init(cookie, &op->op, NULL,
318                                fscache_do_cancel_retrieval,
319                                fscache_release_retrieval_op);
320         op->op.flags    = FSCACHE_OP_MYTHREAD |
321                 (1UL << FSCACHE_OP_WAITING) |
322                 (1UL << FSCACHE_OP_UNUSE_COOKIE);
323         op->cookie      = cookie;
324         op->mapping     = mapping;
325         op->end_io_func = end_io_func;
326         op->context     = context;
327         op->start_time  = jiffies;
328         INIT_LIST_HEAD(&op->to_do);
329
330         /* Pin the netfs read context in case we need to do the actual netfs
331          * read because we've encountered a cache read failure.
332          */
333         if (context)
334                 fscache_get_context(op->cookie, context);
335         return op;
336 }
337
338 /*
339  * wait for a deferred lookup to complete
340  */
341 int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
342 {
343         unsigned long jif;
344
345         _enter("");
346
347         if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
348                 _leave(" = 0 [imm]");
349                 return 0;
350         }
351
352         fscache_stat(&fscache_n_retrievals_wait);
353
354         jif = jiffies;
355         if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
356                         TASK_INTERRUPTIBLE) != 0) {
357                 fscache_stat(&fscache_n_retrievals_intr);
358                 _leave(" = -ERESTARTSYS");
359                 return -ERESTARTSYS;
360         }
361
362         ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
363
364         smp_rmb();
365         fscache_hist(fscache_retrieval_delay_histogram, jif);
366         _leave(" = 0 [dly]");
367         return 0;
368 }
369
370 /*
371  * wait for an object to become active (or dead)
372  */
373 int fscache_wait_for_operation_activation(struct fscache_object *object,
374                                           struct fscache_operation *op,
375                                           atomic_t *stat_op_waits,
376                                           atomic_t *stat_object_dead)
377 {
378         int ret;
379
380         if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
381                 goto check_if_dead;
382
383         _debug(">>> WT");
384         if (stat_op_waits)
385                 fscache_stat(stat_op_waits);
386         if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
387                         TASK_INTERRUPTIBLE) != 0) {
388                 trace_fscache_op(object->cookie, op, fscache_op_signal);
389                 ret = fscache_cancel_op(op, false);
390                 if (ret == 0)
391                         return -ERESTARTSYS;
392
393                 /* it's been removed from the pending queue by another party,
394                  * so we should get to run shortly */
395                 wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
396                             TASK_UNINTERRUPTIBLE);
397         }
398         _debug("<<< GO");
399
400 check_if_dead:
401         if (op->state == FSCACHE_OP_ST_CANCELLED) {
402                 if (stat_object_dead)
403                         fscache_stat(stat_object_dead);
404                 _leave(" = -ENOBUFS [cancelled]");
405                 return -ENOBUFS;
406         }
407         if (unlikely(fscache_object_is_dying(object) ||
408                      fscache_cache_is_broken(object))) {
409                 enum fscache_operation_state state = op->state;
410                 trace_fscache_op(object->cookie, op, fscache_op_signal);
411                 fscache_cancel_op(op, true);
412                 if (stat_object_dead)
413                         fscache_stat(stat_object_dead);
414                 _leave(" = -ENOBUFS [obj dead %d]", state);
415                 return -ENOBUFS;
416         }
417         return 0;
418 }
419
420 /*
421  * read a page from the cache or allocate a block in which to store it
422  * - we return:
423  *   -ENOMEM    - out of memory, nothing done
424  *   -ERESTARTSYS - interrupted
425  *   -ENOBUFS   - no backing object available in which to cache the block
426  *   -ENODATA   - no data available in the backing object for this block
427  *   0          - dispatched a read - it'll call end_io_func() when finished
428  */
429 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
430                                  struct page *page,
431                                  fscache_rw_complete_t end_io_func,
432                                  void *context,
433                                  gfp_t gfp)
434 {
435         struct fscache_retrieval *op;
436         struct fscache_object *object;
437         bool wake_cookie = false;
438         int ret;
439
440         _enter("%p,%p,,,", cookie, page);
441
442         fscache_stat(&fscache_n_retrievals);
443
444         if (hlist_empty(&cookie->backing_objects))
445                 goto nobufs;
446
447         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
448                 _leave(" = -ENOBUFS [invalidating]");
449                 return -ENOBUFS;
450         }
451
452         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
453         ASSERTCMP(page, !=, NULL);
454
455         if (fscache_wait_for_deferred_lookup(cookie) < 0)
456                 return -ERESTARTSYS;
457
458         op = fscache_alloc_retrieval(cookie, page->mapping,
459                                      end_io_func, context);
460         if (!op) {
461                 _leave(" = -ENOMEM");
462                 return -ENOMEM;
463         }
464         atomic_set(&op->n_pages, 1);
465         trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one);
466
467         spin_lock(&cookie->lock);
468
469         if (!fscache_cookie_enabled(cookie) ||
470             hlist_empty(&cookie->backing_objects))
471                 goto nobufs_unlock;
472         object = hlist_entry(cookie->backing_objects.first,
473                              struct fscache_object, cookie_link);
474
475         ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
476
477         __fscache_use_cookie(cookie);
478         atomic_inc(&object->n_reads);
479         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
480
481         if (fscache_submit_op(object, &op->op) < 0)
482                 goto nobufs_unlock_dec;
483         spin_unlock(&cookie->lock);
484
485         fscache_stat(&fscache_n_retrieval_ops);
486
487         /* we wait for the operation to become active, and then process it
488          * *here*, in this thread, and not in the thread pool */
489         ret = fscache_wait_for_operation_activation(
490                 object, &op->op,
491                 __fscache_stat(&fscache_n_retrieval_op_waits),
492                 __fscache_stat(&fscache_n_retrievals_object_dead));
493         if (ret < 0)
494                 goto error;
495
496         /* ask the cache to honour the operation */
497         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
498                 fscache_stat(&fscache_n_cop_allocate_page);
499                 ret = object->cache->ops->allocate_page(op, page, gfp);
500                 fscache_stat_d(&fscache_n_cop_allocate_page);
501                 if (ret == 0)
502                         ret = -ENODATA;
503         } else {
504                 fscache_stat(&fscache_n_cop_read_or_alloc_page);
505                 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
506                 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
507         }
508
509 error:
510         if (ret == -ENOMEM)
511                 fscache_stat(&fscache_n_retrievals_nomem);
512         else if (ret == -ERESTARTSYS)
513                 fscache_stat(&fscache_n_retrievals_intr);
514         else if (ret == -ENODATA)
515                 fscache_stat(&fscache_n_retrievals_nodata);
516         else if (ret < 0)
517                 fscache_stat(&fscache_n_retrievals_nobufs);
518         else
519                 fscache_stat(&fscache_n_retrievals_ok);
520
521         fscache_put_retrieval(op);
522         _leave(" = %d", ret);
523         return ret;
524
525 nobufs_unlock_dec:
526         atomic_dec(&object->n_reads);
527         wake_cookie = __fscache_unuse_cookie(cookie);
528 nobufs_unlock:
529         spin_unlock(&cookie->lock);
530         if (wake_cookie)
531                 __fscache_wake_unused_cookie(cookie);
532         fscache_put_retrieval(op);
533 nobufs:
534         fscache_stat(&fscache_n_retrievals_nobufs);
535         _leave(" = -ENOBUFS");
536         return -ENOBUFS;
537 }
538 EXPORT_SYMBOL(__fscache_read_or_alloc_page);
539
540 /*
541  * read a list of page from the cache or allocate a block in which to store
542  * them
543  * - we return:
544  *   -ENOMEM    - out of memory, some pages may be being read
545  *   -ERESTARTSYS - interrupted, some pages may be being read
546  *   -ENOBUFS   - no backing object or space available in which to cache any
547  *                pages not being read
548  *   -ENODATA   - no data available in the backing object for some or all of
549  *                the pages
550  *   0          - dispatched a read on all pages
551  *
552  * end_io_func() will be called for each page read from the cache as it is
553  * finishes being read
554  *
555  * any pages for which a read is dispatched will be removed from pages and
556  * nr_pages
557  */
558 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
559                                   struct address_space *mapping,
560                                   struct list_head *pages,
561                                   unsigned *nr_pages,
562                                   fscache_rw_complete_t end_io_func,
563                                   void *context,
564                                   gfp_t gfp)
565 {
566         struct fscache_retrieval *op;
567         struct fscache_object *object;
568         bool wake_cookie = false;
569         int ret;
570
571         _enter("%p,,%d,,,", cookie, *nr_pages);
572
573         fscache_stat(&fscache_n_retrievals);
574
575         if (hlist_empty(&cookie->backing_objects))
576                 goto nobufs;
577
578         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
579                 _leave(" = -ENOBUFS [invalidating]");
580                 return -ENOBUFS;
581         }
582
583         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
584         ASSERTCMP(*nr_pages, >, 0);
585         ASSERT(!list_empty(pages));
586
587         if (fscache_wait_for_deferred_lookup(cookie) < 0)
588                 return -ERESTARTSYS;
589
590         op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
591         if (!op)
592                 return -ENOMEM;
593         atomic_set(&op->n_pages, *nr_pages);
594         trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi);
595
596         spin_lock(&cookie->lock);
597
598         if (!fscache_cookie_enabled(cookie) ||
599             hlist_empty(&cookie->backing_objects))
600                 goto nobufs_unlock;
601         object = hlist_entry(cookie->backing_objects.first,
602                              struct fscache_object, cookie_link);
603
604         __fscache_use_cookie(cookie);
605         atomic_inc(&object->n_reads);
606         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
607
608         if (fscache_submit_op(object, &op->op) < 0)
609                 goto nobufs_unlock_dec;
610         spin_unlock(&cookie->lock);
611
612         fscache_stat(&fscache_n_retrieval_ops);
613
614         /* we wait for the operation to become active, and then process it
615          * *here*, in this thread, and not in the thread pool */
616         ret = fscache_wait_for_operation_activation(
617                 object, &op->op,
618                 __fscache_stat(&fscache_n_retrieval_op_waits),
619                 __fscache_stat(&fscache_n_retrievals_object_dead));
620         if (ret < 0)
621                 goto error;
622
623         /* ask the cache to honour the operation */
624         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
625                 fscache_stat(&fscache_n_cop_allocate_pages);
626                 ret = object->cache->ops->allocate_pages(
627                         op, pages, nr_pages, gfp);
628                 fscache_stat_d(&fscache_n_cop_allocate_pages);
629         } else {
630                 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
631                 ret = object->cache->ops->read_or_alloc_pages(
632                         op, pages, nr_pages, gfp);
633                 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
634         }
635
636 error:
637         if (ret == -ENOMEM)
638                 fscache_stat(&fscache_n_retrievals_nomem);
639         else if (ret == -ERESTARTSYS)
640                 fscache_stat(&fscache_n_retrievals_intr);
641         else if (ret == -ENODATA)
642                 fscache_stat(&fscache_n_retrievals_nodata);
643         else if (ret < 0)
644                 fscache_stat(&fscache_n_retrievals_nobufs);
645         else
646                 fscache_stat(&fscache_n_retrievals_ok);
647
648         fscache_put_retrieval(op);
649         _leave(" = %d", ret);
650         return ret;
651
652 nobufs_unlock_dec:
653         atomic_dec(&object->n_reads);
654         wake_cookie = __fscache_unuse_cookie(cookie);
655 nobufs_unlock:
656         spin_unlock(&cookie->lock);
657         fscache_put_retrieval(op);
658         if (wake_cookie)
659                 __fscache_wake_unused_cookie(cookie);
660 nobufs:
661         fscache_stat(&fscache_n_retrievals_nobufs);
662         _leave(" = -ENOBUFS");
663         return -ENOBUFS;
664 }
665 EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
666
667 /*
668  * allocate a block in the cache on which to store a page
669  * - we return:
670  *   -ENOMEM    - out of memory, nothing done
671  *   -ERESTARTSYS - interrupted
672  *   -ENOBUFS   - no backing object available in which to cache the block
673  *   0          - block allocated
674  */
675 int __fscache_alloc_page(struct fscache_cookie *cookie,
676                          struct page *page,
677                          gfp_t gfp)
678 {
679         struct fscache_retrieval *op;
680         struct fscache_object *object;
681         bool wake_cookie = false;
682         int ret;
683
684         _enter("%p,%p,,,", cookie, page);
685
686         fscache_stat(&fscache_n_allocs);
687
688         if (hlist_empty(&cookie->backing_objects))
689                 goto nobufs;
690
691         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
692         ASSERTCMP(page, !=, NULL);
693
694         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
695                 _leave(" = -ENOBUFS [invalidating]");
696                 return -ENOBUFS;
697         }
698
699         if (fscache_wait_for_deferred_lookup(cookie) < 0)
700                 return -ERESTARTSYS;
701
702         op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
703         if (!op)
704                 return -ENOMEM;
705         atomic_set(&op->n_pages, 1);
706         trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one);
707
708         spin_lock(&cookie->lock);
709
710         if (!fscache_cookie_enabled(cookie) ||
711             hlist_empty(&cookie->backing_objects))
712                 goto nobufs_unlock;
713         object = hlist_entry(cookie->backing_objects.first,
714                              struct fscache_object, cookie_link);
715
716         __fscache_use_cookie(cookie);
717         if (fscache_submit_op(object, &op->op) < 0)
718                 goto nobufs_unlock_dec;
719         spin_unlock(&cookie->lock);
720
721         fscache_stat(&fscache_n_alloc_ops);
722
723         ret = fscache_wait_for_operation_activation(
724                 object, &op->op,
725                 __fscache_stat(&fscache_n_alloc_op_waits),
726                 __fscache_stat(&fscache_n_allocs_object_dead));
727         if (ret < 0)
728                 goto error;
729
730         /* ask the cache to honour the operation */
731         fscache_stat(&fscache_n_cop_allocate_page);
732         ret = object->cache->ops->allocate_page(op, page, gfp);
733         fscache_stat_d(&fscache_n_cop_allocate_page);
734
735 error:
736         if (ret == -ERESTARTSYS)
737                 fscache_stat(&fscache_n_allocs_intr);
738         else if (ret < 0)
739                 fscache_stat(&fscache_n_allocs_nobufs);
740         else
741                 fscache_stat(&fscache_n_allocs_ok);
742
743         fscache_put_retrieval(op);
744         _leave(" = %d", ret);
745         return ret;
746
747 nobufs_unlock_dec:
748         wake_cookie = __fscache_unuse_cookie(cookie);
749 nobufs_unlock:
750         spin_unlock(&cookie->lock);
751         fscache_put_retrieval(op);
752         if (wake_cookie)
753                 __fscache_wake_unused_cookie(cookie);
754 nobufs:
755         fscache_stat(&fscache_n_allocs_nobufs);
756         _leave(" = -ENOBUFS");
757         return -ENOBUFS;
758 }
759 EXPORT_SYMBOL(__fscache_alloc_page);
760
761 /*
762  * Unmark pages allocate in the readahead code path (via:
763  * fscache_readpages_or_alloc) after delegating to the base filesystem
764  */
765 void __fscache_readpages_cancel(struct fscache_cookie *cookie,
766                                 struct list_head *pages)
767 {
768         struct page *page;
769
770         list_for_each_entry(page, pages, lru) {
771                 if (PageFsCache(page))
772                         __fscache_uncache_page(cookie, page);
773         }
774 }
775 EXPORT_SYMBOL(__fscache_readpages_cancel);
776
777 /*
778  * release a write op reference
779  */
780 static void fscache_release_write_op(struct fscache_operation *_op)
781 {
782         _enter("{OP%x}", _op->debug_id);
783 }
784
785 /*
786  * perform the background storage of a page into the cache
787  */
788 static void fscache_write_op(struct fscache_operation *_op)
789 {
790         struct fscache_storage *op =
791                 container_of(_op, struct fscache_storage, op);
792         struct fscache_object *object = op->op.object;
793         struct fscache_cookie *cookie;
794         struct page *page;
795         unsigned n;
796         void *results[1];
797         int ret;
798
799         _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
800
801 again:
802         spin_lock(&object->lock);
803         cookie = object->cookie;
804
805         if (!fscache_object_is_active(object)) {
806                 /* If we get here, then the on-disk cache object likely no
807                  * longer exists, so we should just cancel this write
808                  * operation.
809                  */
810                 spin_unlock(&object->lock);
811                 fscache_op_complete(&op->op, true);
812                 _leave(" [inactive]");
813                 return;
814         }
815
816         if (!cookie) {
817                 /* If we get here, then the cookie belonging to the object was
818                  * detached, probably by the cookie being withdrawn due to
819                  * memory pressure, which means that the pages we might write
820                  * to the cache from no longer exist - therefore, we can just
821                  * cancel this write operation.
822                  */
823                 spin_unlock(&object->lock);
824                 fscache_op_complete(&op->op, true);
825                 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
826                        _op->flags, _op->state, object->state->short_name,
827                        object->flags);
828                 return;
829         }
830
831         spin_lock(&cookie->stores_lock);
832
833         fscache_stat(&fscache_n_store_calls);
834
835         /* find a page to store */
836         results[0] = NULL;
837         page = NULL;
838         n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
839                                        FSCACHE_COOKIE_PENDING_TAG);
840         trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit);
841         if (n != 1)
842                 goto superseded;
843         page = results[0];
844         _debug("gang %d [%lx]", n, page->index);
845
846         radix_tree_tag_set(&cookie->stores, page->index,
847                            FSCACHE_COOKIE_STORING_TAG);
848         radix_tree_tag_clear(&cookie->stores, page->index,
849                              FSCACHE_COOKIE_PENDING_TAG);
850         trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
851
852         spin_unlock(&cookie->stores_lock);
853         spin_unlock(&object->lock);
854
855         if (page->index >= op->store_limit)
856                 goto discard_page;
857
858         fscache_stat(&fscache_n_store_pages);
859         fscache_stat(&fscache_n_cop_write_page);
860         ret = object->cache->ops->write_page(op, page);
861         fscache_stat_d(&fscache_n_cop_write_page);
862         trace_fscache_wrote_page(cookie, page, &op->op, ret);
863         fscache_end_page_write(object, page);
864         if (ret < 0) {
865                 fscache_abort_object(object);
866                 fscache_op_complete(&op->op, true);
867         } else {
868                 fscache_enqueue_operation(&op->op);
869         }
870
871         _leave("");
872         return;
873
874 discard_page:
875         fscache_stat(&fscache_n_store_pages_over_limit);
876         trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS);
877         fscache_end_page_write(object, page);
878         goto again;
879
880 superseded:
881         /* this writer is going away and there aren't any more things to
882          * write */
883         _debug("cease");
884         spin_unlock(&cookie->stores_lock);
885         clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
886         spin_unlock(&object->lock);
887         fscache_op_complete(&op->op, false);
888         _leave("");
889 }
890
891 /*
892  * Clear the pages pending writing for invalidation
893  */
894 void fscache_invalidate_writes(struct fscache_cookie *cookie)
895 {
896         struct page *page;
897         void *results[16];
898         int n, i;
899
900         _enter("");
901
902         for (;;) {
903                 spin_lock(&cookie->stores_lock);
904                 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
905                                                ARRAY_SIZE(results),
906                                                FSCACHE_COOKIE_PENDING_TAG);
907                 if (n == 0) {
908                         spin_unlock(&cookie->stores_lock);
909                         break;
910                 }
911
912                 for (i = n - 1; i >= 0; i--) {
913                         page = results[i];
914                         radix_tree_delete(&cookie->stores, page->index);
915                         trace_fscache_page(cookie, page, fscache_page_radix_delete);
916                         trace_fscache_page(cookie, page, fscache_page_inval);
917                 }
918
919                 spin_unlock(&cookie->stores_lock);
920
921                 for (i = n - 1; i >= 0; i--)
922                         put_page(results[i]);
923         }
924
925         wake_up_bit(&cookie->flags, 0);
926         trace_fscache_wake_cookie(cookie);
927
928         _leave("");
929 }
930
931 /*
932  * request a page be stored in the cache
933  * - returns:
934  *   -ENOMEM    - out of memory, nothing done
935  *   -ENOBUFS   - no backing object available in which to cache the page
936  *   0          - dispatched a write - it'll call end_io_func() when finished
937  *
938  * if the cookie still has a backing object at this point, that object can be
939  * in one of a few states with respect to storage processing:
940  *
941  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
942  *      set)
943  *
944  *      (a) no writes yet
945  *
946  *      (b) writes deferred till post-creation (mark page for writing and
947  *          return immediately)
948  *
949  *  (2) negative lookup, object created, initial fill being made from netfs
950  *
951  *      (a) fill point not yet reached this page (mark page for writing and
952  *          return)
953  *
954  *      (b) fill point passed this page (queue op to store this page)
955  *
956  *  (3) object extant (queue op to store this page)
957  *
958  * any other state is invalid
959  */
960 int __fscache_write_page(struct fscache_cookie *cookie,
961                          struct page *page,
962                          loff_t object_size,
963                          gfp_t gfp)
964 {
965         struct fscache_storage *op;
966         struct fscache_object *object;
967         bool wake_cookie = false;
968         int ret;
969
970         _enter("%p,%x,", cookie, (u32) page->flags);
971
972         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
973         ASSERT(PageFsCache(page));
974
975         fscache_stat(&fscache_n_stores);
976
977         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
978                 _leave(" = -ENOBUFS [invalidating]");
979                 return -ENOBUFS;
980         }
981
982         op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
983         if (!op)
984                 goto nomem;
985
986         fscache_operation_init(cookie, &op->op, fscache_write_op, NULL,
987                                fscache_release_write_op);
988         op->op.flags = FSCACHE_OP_ASYNC |
989                 (1 << FSCACHE_OP_WAITING) |
990                 (1 << FSCACHE_OP_UNUSE_COOKIE);
991
992         ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
993         if (ret < 0)
994                 goto nomem_free;
995
996         trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
997
998         ret = -ENOBUFS;
999         spin_lock(&cookie->lock);
1000
1001         if (!fscache_cookie_enabled(cookie) ||
1002             hlist_empty(&cookie->backing_objects))
1003                 goto nobufs;
1004         object = hlist_entry(cookie->backing_objects.first,
1005                              struct fscache_object, cookie_link);
1006         if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
1007                 goto nobufs;
1008
1009         trace_fscache_page(cookie, page, fscache_page_write);
1010
1011         /* add the page to the pending-storage radix tree on the backing
1012          * object */
1013         spin_lock(&object->lock);
1014
1015         if (object->store_limit_l != object_size)
1016                 fscache_set_store_limit(object, object_size);
1017
1018         spin_lock(&cookie->stores_lock);
1019
1020         _debug("store limit %llx", (unsigned long long) object->store_limit);
1021
1022         ret = radix_tree_insert(&cookie->stores, page->index, page);
1023         if (ret < 0) {
1024                 if (ret == -EEXIST)
1025                         goto already_queued;
1026                 _debug("insert failed %d", ret);
1027                 goto nobufs_unlock_obj;
1028         }
1029
1030         trace_fscache_page(cookie, page, fscache_page_radix_insert);
1031         radix_tree_tag_set(&cookie->stores, page->index,
1032                            FSCACHE_COOKIE_PENDING_TAG);
1033         trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
1034         get_page(page);
1035
1036         /* we only want one writer at a time, but we do need to queue new
1037          * writers after exclusive ops */
1038         if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
1039                 goto already_pending;
1040
1041         spin_unlock(&cookie->stores_lock);
1042         spin_unlock(&object->lock);
1043
1044         op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
1045         op->store_limit = object->store_limit;
1046
1047         __fscache_use_cookie(cookie);
1048         if (fscache_submit_op(object, &op->op) < 0)
1049                 goto submit_failed;
1050
1051         spin_unlock(&cookie->lock);
1052         radix_tree_preload_end();
1053         fscache_stat(&fscache_n_store_ops);
1054         fscache_stat(&fscache_n_stores_ok);
1055
1056         /* the work queue now carries its own ref on the object */
1057         fscache_put_operation(&op->op);
1058         _leave(" = 0");
1059         return 0;
1060
1061 already_queued:
1062         fscache_stat(&fscache_n_stores_again);
1063 already_pending:
1064         spin_unlock(&cookie->stores_lock);
1065         spin_unlock(&object->lock);
1066         spin_unlock(&cookie->lock);
1067         radix_tree_preload_end();
1068         fscache_put_operation(&op->op);
1069         fscache_stat(&fscache_n_stores_ok);
1070         _leave(" = 0");
1071         return 0;
1072
1073 submit_failed:
1074         spin_lock(&cookie->stores_lock);
1075         radix_tree_delete(&cookie->stores, page->index);
1076         trace_fscache_page(cookie, page, fscache_page_radix_delete);
1077         spin_unlock(&cookie->stores_lock);
1078         wake_cookie = __fscache_unuse_cookie(cookie);
1079         put_page(page);
1080         ret = -ENOBUFS;
1081         goto nobufs;
1082
1083 nobufs_unlock_obj:
1084         spin_unlock(&cookie->stores_lock);
1085         spin_unlock(&object->lock);
1086 nobufs:
1087         spin_unlock(&cookie->lock);
1088         radix_tree_preload_end();
1089         fscache_put_operation(&op->op);
1090         if (wake_cookie)
1091                 __fscache_wake_unused_cookie(cookie);
1092         fscache_stat(&fscache_n_stores_nobufs);
1093         _leave(" = -ENOBUFS");
1094         return -ENOBUFS;
1095
1096 nomem_free:
1097         fscache_put_operation(&op->op);
1098 nomem:
1099         fscache_stat(&fscache_n_stores_oom);
1100         _leave(" = -ENOMEM");
1101         return -ENOMEM;
1102 }
1103 EXPORT_SYMBOL(__fscache_write_page);
1104
1105 /*
1106  * remove a page from the cache
1107  */
1108 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
1109 {
1110         struct fscache_object *object;
1111
1112         _enter(",%p", page);
1113
1114         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
1115         ASSERTCMP(page, !=, NULL);
1116
1117         fscache_stat(&fscache_n_uncaches);
1118
1119         /* cache withdrawal may beat us to it */
1120         if (!PageFsCache(page))
1121                 goto done;
1122
1123         trace_fscache_page(cookie, page, fscache_page_uncache);
1124
1125         /* get the object */
1126         spin_lock(&cookie->lock);
1127
1128         if (hlist_empty(&cookie->backing_objects)) {
1129                 ClearPageFsCache(page);
1130                 goto done_unlock;
1131         }
1132
1133         object = hlist_entry(cookie->backing_objects.first,
1134                              struct fscache_object, cookie_link);
1135
1136         /* there might now be stuff on disk we could read */
1137         clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
1138
1139         /* only invoke the cache backend if we managed to mark the page
1140          * uncached here; this deals with synchronisation vs withdrawal */
1141         if (TestClearPageFsCache(page) &&
1142             object->cache->ops->uncache_page) {
1143                 /* the cache backend releases the cookie lock */
1144                 fscache_stat(&fscache_n_cop_uncache_page);
1145                 object->cache->ops->uncache_page(object, page);
1146                 fscache_stat_d(&fscache_n_cop_uncache_page);
1147                 goto done;
1148         }
1149
1150 done_unlock:
1151         spin_unlock(&cookie->lock);
1152 done:
1153         _leave("");
1154 }
1155 EXPORT_SYMBOL(__fscache_uncache_page);
1156
1157 /**
1158  * fscache_mark_page_cached - Mark a page as being cached
1159  * @op: The retrieval op pages are being marked for
1160  * @page: The page to be marked
1161  *
1162  * Mark a netfs page as being cached.  After this is called, the netfs
1163  * must call fscache_uncache_page() to remove the mark.
1164  */
1165 void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
1166 {
1167         struct fscache_cookie *cookie = op->op.object->cookie;
1168
1169 #ifdef CONFIG_FSCACHE_STATS
1170         atomic_inc(&fscache_n_marks);
1171 #endif
1172
1173         trace_fscache_page(cookie, page, fscache_page_cached);
1174
1175         _debug("- mark %p{%lx}", page, page->index);
1176         if (TestSetPageFsCache(page)) {
1177                 static bool once_only;
1178                 if (!once_only) {
1179                         once_only = true;
1180                         pr_warn("Cookie type %s marked page %lx multiple times\n",
1181                                 cookie->def->name, page->index);
1182                 }
1183         }
1184
1185         if (cookie->def->mark_page_cached)
1186                 cookie->def->mark_page_cached(cookie->netfs_data,
1187                                               op->mapping, page);
1188 }
1189 EXPORT_SYMBOL(fscache_mark_page_cached);
1190
1191 /**
1192  * fscache_mark_pages_cached - Mark pages as being cached
1193  * @op: The retrieval op pages are being marked for
1194  * @pagevec: The pages to be marked
1195  *
1196  * Mark a bunch of netfs pages as being cached.  After this is called,
1197  * the netfs must call fscache_uncache_page() to remove the mark.
1198  */
1199 void fscache_mark_pages_cached(struct fscache_retrieval *op,
1200                                struct pagevec *pagevec)
1201 {
1202         unsigned long loop;
1203
1204         for (loop = 0; loop < pagevec->nr; loop++)
1205                 fscache_mark_page_cached(op, pagevec->pages[loop]);
1206
1207         pagevec_reinit(pagevec);
1208 }
1209 EXPORT_SYMBOL(fscache_mark_pages_cached);
1210
1211 /*
1212  * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1213  * to be associated with the given cookie.
1214  */
1215 void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
1216                                        struct inode *inode)
1217 {
1218         struct address_space *mapping = inode->i_mapping;
1219         struct pagevec pvec;
1220         pgoff_t next;
1221         int i;
1222
1223         _enter("%p,%p", cookie, inode);
1224
1225         if (!mapping || mapping->nrpages == 0) {
1226                 _leave(" [no pages]");
1227                 return;
1228         }
1229
1230         pagevec_init(&pvec);
1231         next = 0;
1232         do {
1233                 if (!pagevec_lookup(&pvec, mapping, &next))
1234                         break;
1235                 for (i = 0; i < pagevec_count(&pvec); i++) {
1236                         struct page *page = pvec.pages[i];
1237                         if (PageFsCache(page)) {
1238                                 __fscache_wait_on_page_write(cookie, page);
1239                                 __fscache_uncache_page(cookie, page);
1240                         }
1241                 }
1242                 pagevec_release(&pvec);
1243                 cond_resched();
1244         } while (next);
1245
1246         _leave("");
1247 }
1248 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);