Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-microblaze.git] / fs / nfs / read.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/read.c
4  *
5  * Block I/O for NFS
6  *
7  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8  * modified for async RPC by okir@monad.swb.de
9  */
10
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/pagemap.h>
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/module.h>
23
24 #include "nfs4_fs.h"
25 #include "internal.h"
26 #include "iostat.h"
27 #include "fscache.h"
28 #include "pnfs.h"
29 #include "nfstrace.h"
30
31 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
32
33 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
34 static const struct nfs_rw_ops nfs_rw_read_ops;
35
36 static struct kmem_cache *nfs_rdata_cachep;
37
38 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
39 {
40         struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
41
42         if (p)
43                 p->rw_mode = FMODE_READ;
44         return p;
45 }
46
47 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
48 {
49         kmem_cache_free(nfs_rdata_cachep, rhdr);
50 }
51
52 static
53 int nfs_return_empty_page(struct page *page)
54 {
55         zero_user(page, 0, PAGE_SIZE);
56         SetPageUptodate(page);
57         unlock_page(page);
58         return 0;
59 }
60
61 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
62                               struct inode *inode, bool force_mds,
63                               const struct nfs_pgio_completion_ops *compl_ops)
64 {
65         struct nfs_server *server = NFS_SERVER(inode);
66         const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
67
68 #ifdef CONFIG_NFS_V4_1
69         if (server->pnfs_curr_ld && !force_mds)
70                 pg_ops = server->pnfs_curr_ld->pg_read_ops;
71 #endif
72         nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
73                         server->rsize, 0);
74 }
75 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
76
77 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
78 {
79         struct nfs_pgio_mirror *mirror;
80
81         if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
82                 pgio->pg_ops->pg_cleanup(pgio);
83
84         pgio->pg_ops = &nfs_pgio_rw_ops;
85
86         /* read path should never have more than one mirror */
87         WARN_ON_ONCE(pgio->pg_mirror_count != 1);
88
89         mirror = &pgio->pg_mirrors[0];
90         mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
91 }
92 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
93
94 static void nfs_readpage_release(struct nfs_page *req, int error)
95 {
96         struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
97         struct page *page = req->wb_page;
98
99         dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
100                 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
101                 (long long)req_offset(req));
102
103         if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
104                 SetPageError(page);
105         if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
106                 struct address_space *mapping = page_file_mapping(page);
107
108                 if (PageUptodate(page))
109                         nfs_readpage_to_fscache(inode, page, 0);
110                 else if (!PageError(page) && !PagePrivate(page))
111                         generic_error_remove_page(mapping, page);
112                 unlock_page(page);
113         }
114         nfs_release_request(req);
115 }
116
117 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
118                        struct page *page)
119 {
120         struct nfs_page *new;
121         unsigned int len;
122         struct nfs_pageio_descriptor pgio;
123         struct nfs_pgio_mirror *pgm;
124
125         len = nfs_page_length(page);
126         if (len == 0)
127                 return nfs_return_empty_page(page);
128         new = nfs_create_request(ctx, page, 0, len);
129         if (IS_ERR(new)) {
130                 unlock_page(page);
131                 return PTR_ERR(new);
132         }
133         if (len < PAGE_SIZE)
134                 zero_user_segment(page, len, PAGE_SIZE);
135
136         nfs_pageio_init_read(&pgio, inode, false,
137                              &nfs_async_read_completion_ops);
138         if (!nfs_pageio_add_request(&pgio, new)) {
139                 nfs_list_remove_request(new);
140                 nfs_readpage_release(new, pgio.pg_error);
141         }
142         nfs_pageio_complete(&pgio);
143
144         /* It doesn't make sense to do mirrored reads! */
145         WARN_ON_ONCE(pgio.pg_mirror_count != 1);
146
147         pgm = &pgio.pg_mirrors[0];
148         NFS_I(inode)->read_io += pgm->pg_bytes_written;
149
150         return pgio.pg_error < 0 ? pgio.pg_error : 0;
151 }
152
153 static void nfs_page_group_set_uptodate(struct nfs_page *req)
154 {
155         if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
156                 SetPageUptodate(req->wb_page);
157 }
158
159 static void nfs_read_completion(struct nfs_pgio_header *hdr)
160 {
161         unsigned long bytes = 0;
162         int error;
163
164         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
165                 goto out;
166         while (!list_empty(&hdr->pages)) {
167                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
168                 struct page *page = req->wb_page;
169                 unsigned long start = req->wb_pgbase;
170                 unsigned long end = req->wb_pgbase + req->wb_bytes;
171
172                 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
173                         /* note: regions of the page not covered by a
174                          * request are zeroed in nfs_readpage_async /
175                          * readpage_async_filler */
176                         if (bytes > hdr->good_bytes) {
177                                 /* nothing in this request was good, so zero
178                                  * the full extent of the request */
179                                 zero_user_segment(page, start, end);
180
181                         } else if (hdr->good_bytes - bytes < req->wb_bytes) {
182                                 /* part of this request has good bytes, but
183                                  * not all. zero the bad bytes */
184                                 start += hdr->good_bytes - bytes;
185                                 WARN_ON(start < req->wb_pgbase);
186                                 zero_user_segment(page, start, end);
187                         }
188                 }
189                 error = 0;
190                 bytes += req->wb_bytes;
191                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
192                         if (bytes <= hdr->good_bytes)
193                                 nfs_page_group_set_uptodate(req);
194                         else {
195                                 error = hdr->error;
196                                 xchg(&nfs_req_openctx(req)->error, error);
197                         }
198                 } else
199                         nfs_page_group_set_uptodate(req);
200                 nfs_list_remove_request(req);
201                 nfs_readpage_release(req, error);
202         }
203 out:
204         hdr->release(hdr);
205 }
206
207 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
208                               struct rpc_message *msg,
209                               const struct nfs_rpc_ops *rpc_ops,
210                               struct rpc_task_setup *task_setup_data, int how)
211 {
212         struct inode *inode = hdr->inode;
213         int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
214
215         task_setup_data->flags |= swap_flags;
216         rpc_ops->read_setup(hdr, msg);
217         trace_nfs_initiate_read(inode, hdr->io_start, hdr->good_bytes);
218 }
219
220 static void
221 nfs_async_read_error(struct list_head *head, int error)
222 {
223         struct nfs_page *req;
224
225         while (!list_empty(head)) {
226                 req = nfs_list_entry(head->next);
227                 nfs_list_remove_request(req);
228                 nfs_readpage_release(req, error);
229         }
230 }
231
232 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
233         .error_cleanup = nfs_async_read_error,
234         .completion = nfs_read_completion,
235 };
236
237 /*
238  * This is the callback from RPC telling us whether a reply was
239  * received or some error occurred (timeout or socket shutdown).
240  */
241 static int nfs_readpage_done(struct rpc_task *task,
242                              struct nfs_pgio_header *hdr,
243                              struct inode *inode)
244 {
245         int status = NFS_PROTO(inode)->read_done(task, hdr);
246         if (status != 0)
247                 return status;
248
249         nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
250         trace_nfs_readpage_done(inode, task->tk_status,
251                                 hdr->args.offset, hdr->res.eof);
252
253         if (task->tk_status == -ESTALE) {
254                 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
255                 nfs_mark_for_revalidate(inode);
256         }
257         return 0;
258 }
259
260 static void nfs_readpage_retry(struct rpc_task *task,
261                                struct nfs_pgio_header *hdr)
262 {
263         struct nfs_pgio_args *argp = &hdr->args;
264         struct nfs_pgio_res  *resp = &hdr->res;
265
266         /* This is a short read! */
267         nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
268         /* Has the server at least made some progress? */
269         if (resp->count == 0) {
270                 nfs_set_pgio_error(hdr, -EIO, argp->offset);
271                 return;
272         }
273
274         /* For non rpc-based layout drivers, retry-through-MDS */
275         if (!task->tk_ops) {
276                 hdr->pnfs_error = -EAGAIN;
277                 return;
278         }
279
280         /* Yes, so retry the read at the end of the hdr */
281         hdr->mds_offset += resp->count;
282         argp->offset += resp->count;
283         argp->pgbase += resp->count;
284         argp->count -= resp->count;
285         rpc_restart_call_prepare(task);
286 }
287
288 static void nfs_readpage_result(struct rpc_task *task,
289                                 struct nfs_pgio_header *hdr)
290 {
291         if (hdr->res.eof) {
292                 loff_t pos = hdr->args.offset + hdr->res.count;
293                 unsigned int new = pos - hdr->io_start;
294
295                 if (hdr->good_bytes > new) {
296                         hdr->good_bytes = new;
297                         set_bit(NFS_IOHDR_EOF, &hdr->flags);
298                         clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
299                 }
300         } else if (hdr->res.count < hdr->args.count)
301                 nfs_readpage_retry(task, hdr);
302 }
303
304 /*
305  * Read a page over NFS.
306  * We read the page synchronously in the following case:
307  *  -   The error flag is set for this page. This happens only when a
308  *      previous async read operation failed.
309  */
310 int nfs_readpage(struct file *file, struct page *page)
311 {
312         struct nfs_open_context *ctx;
313         struct inode *inode = page_file_mapping(page)->host;
314         int             error;
315
316         dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
317                 page, PAGE_SIZE, page_index(page));
318         nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
319         nfs_add_stats(inode, NFSIOS_READPAGES, 1);
320
321         /*
322          * Try to flush any pending writes to the file..
323          *
324          * NOTE! Because we own the page lock, there cannot
325          * be any new pending writes generated at this point
326          * for this page (other pages can be written to).
327          */
328         error = nfs_wb_page(inode, page);
329         if (error)
330                 goto out_unlock;
331         if (PageUptodate(page))
332                 goto out_unlock;
333
334         error = -ESTALE;
335         if (NFS_STALE(inode))
336                 goto out_unlock;
337
338         if (file == NULL) {
339                 error = -EBADF;
340                 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
341                 if (ctx == NULL)
342                         goto out_unlock;
343         } else
344                 ctx = get_nfs_open_context(nfs_file_open_context(file));
345
346         if (!IS_SYNC(inode)) {
347                 error = nfs_readpage_from_fscache(ctx, inode, page);
348                 if (error == 0)
349                         goto out;
350         }
351
352         xchg(&ctx->error, 0);
353         error = nfs_readpage_async(ctx, inode, page);
354         if (!error) {
355                 error = wait_on_page_locked_killable(page);
356                 if (!PageUptodate(page) && !error)
357                         error = xchg(&ctx->error, 0);
358         }
359 out:
360         put_nfs_open_context(ctx);
361         return error;
362 out_unlock:
363         unlock_page(page);
364         return error;
365 }
366
367 struct nfs_readdesc {
368         struct nfs_pageio_descriptor *pgio;
369         struct nfs_open_context *ctx;
370 };
371
372 static int
373 readpage_async_filler(void *data, struct page *page)
374 {
375         struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
376         struct nfs_page *new;
377         unsigned int len;
378         int error;
379
380         len = nfs_page_length(page);
381         if (len == 0)
382                 return nfs_return_empty_page(page);
383
384         new = nfs_create_request(desc->ctx, page, 0, len);
385         if (IS_ERR(new))
386                 goto out_error;
387
388         if (len < PAGE_SIZE)
389                 zero_user_segment(page, len, PAGE_SIZE);
390         if (!nfs_pageio_add_request(desc->pgio, new)) {
391                 nfs_list_remove_request(new);
392                 error = desc->pgio->pg_error;
393                 nfs_readpage_release(new, error);
394                 goto out;
395         }
396         return 0;
397 out_error:
398         error = PTR_ERR(new);
399         unlock_page(page);
400 out:
401         return error;
402 }
403
404 int nfs_readpages(struct file *filp, struct address_space *mapping,
405                 struct list_head *pages, unsigned nr_pages)
406 {
407         struct nfs_pageio_descriptor pgio;
408         struct nfs_pgio_mirror *pgm;
409         struct nfs_readdesc desc = {
410                 .pgio = &pgio,
411         };
412         struct inode *inode = mapping->host;
413         unsigned long npages;
414         int ret = -ESTALE;
415
416         dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
417                         inode->i_sb->s_id,
418                         (unsigned long long)NFS_FILEID(inode),
419                         nr_pages);
420         nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
421
422         if (NFS_STALE(inode))
423                 goto out;
424
425         if (filp == NULL) {
426                 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
427                 if (desc.ctx == NULL)
428                         return -EBADF;
429         } else
430                 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
431
432         /* attempt to read as many of the pages as possible from the cache
433          * - this returns -ENOBUFS immediately if the cookie is negative
434          */
435         ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
436                                          pages, &nr_pages);
437         if (ret == 0)
438                 goto read_complete; /* all pages were read */
439
440         nfs_pageio_init_read(&pgio, inode, false,
441                              &nfs_async_read_completion_ops);
442
443         ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
444         nfs_pageio_complete(&pgio);
445
446         /* It doesn't make sense to do mirrored reads! */
447         WARN_ON_ONCE(pgio.pg_mirror_count != 1);
448
449         pgm = &pgio.pg_mirrors[0];
450         NFS_I(inode)->read_io += pgm->pg_bytes_written;
451         npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
452                  PAGE_SHIFT;
453         nfs_add_stats(inode, NFSIOS_READPAGES, npages);
454 read_complete:
455         put_nfs_open_context(desc.ctx);
456 out:
457         return ret;
458 }
459
460 int __init nfs_init_readpagecache(void)
461 {
462         nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
463                                              sizeof(struct nfs_pgio_header),
464                                              0, SLAB_HWCACHE_ALIGN,
465                                              NULL);
466         if (nfs_rdata_cachep == NULL)
467                 return -ENOMEM;
468
469         return 0;
470 }
471
472 void nfs_destroy_readpagecache(void)
473 {
474         kmem_cache_destroy(nfs_rdata_cachep);
475 }
476
477 static const struct nfs_rw_ops nfs_rw_read_ops = {
478         .rw_alloc_header        = nfs_readhdr_alloc,
479         .rw_free_header         = nfs_readhdr_free,
480         .rw_done                = nfs_readpage_done,
481         .rw_result              = nfs_readpage_result,
482         .rw_initiate            = nfs_initiate_read,
483 };