Merge tag 'leds-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/pavel...
[linux-2.6-microblaze.git] / fs / nfs / read.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/read.c
4  *
5  * Block I/O for NFS
6  *
7  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8  * modified for async RPC by okir@monad.swb.de
9  */
10
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/pagemap.h>
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/module.h>
23
24 #include "nfs4_fs.h"
25 #include "internal.h"
26 #include "iostat.h"
27 #include "fscache.h"
28 #include "pnfs.h"
29 #include "nfstrace.h"
30
31 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
32
33 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
34 static const struct nfs_rw_ops nfs_rw_read_ops;
35
36 static struct kmem_cache *nfs_rdata_cachep;
37
38 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
39 {
40         struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
41
42         if (p)
43                 p->rw_mode = FMODE_READ;
44         return p;
45 }
46
47 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
48 {
49         kmem_cache_free(nfs_rdata_cachep, rhdr);
50 }
51
52 static
53 int nfs_return_empty_page(struct page *page)
54 {
55         zero_user(page, 0, PAGE_SIZE);
56         SetPageUptodate(page);
57         unlock_page(page);
58         return 0;
59 }
60
61 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
62                               struct inode *inode, bool force_mds,
63                               const struct nfs_pgio_completion_ops *compl_ops)
64 {
65         struct nfs_server *server = NFS_SERVER(inode);
66         const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
67
68 #ifdef CONFIG_NFS_V4_1
69         if (server->pnfs_curr_ld && !force_mds)
70                 pg_ops = server->pnfs_curr_ld->pg_read_ops;
71 #endif
72         nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
73                         server->rsize, 0);
74 }
75 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
76
77 static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio,
78                                      struct inode *inode)
79 {
80         struct nfs_pgio_mirror *pgm;
81         unsigned long npages;
82
83         nfs_pageio_complete(pgio);
84
85         /* It doesn't make sense to do mirrored reads! */
86         WARN_ON_ONCE(pgio->pg_mirror_count != 1);
87
88         pgm = &pgio->pg_mirrors[0];
89         NFS_I(inode)->read_io += pgm->pg_bytes_written;
90         npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
91         nfs_add_stats(inode, NFSIOS_READPAGES, npages);
92 }
93
94
95 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
96 {
97         struct nfs_pgio_mirror *mirror;
98
99         if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
100                 pgio->pg_ops->pg_cleanup(pgio);
101
102         pgio->pg_ops = &nfs_pgio_rw_ops;
103
104         /* read path should never have more than one mirror */
105         WARN_ON_ONCE(pgio->pg_mirror_count != 1);
106
107         mirror = &pgio->pg_mirrors[0];
108         mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
109 }
110 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
111
112 static void nfs_readpage_release(struct nfs_page *req, int error)
113 {
114         struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
115         struct page *page = req->wb_page;
116
117         dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
118                 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
119                 (long long)req_offset(req));
120
121         if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
122                 SetPageError(page);
123         if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
124                 struct address_space *mapping = page_file_mapping(page);
125
126                 if (PageUptodate(page))
127                         nfs_readpage_to_fscache(inode, page, 0);
128                 else if (!PageError(page) && !PagePrivate(page))
129                         generic_error_remove_page(mapping, page);
130                 unlock_page(page);
131         }
132         nfs_release_request(req);
133 }
134
135 struct nfs_readdesc {
136         struct nfs_pageio_descriptor pgio;
137         struct nfs_open_context *ctx;
138 };
139
140 static void nfs_page_group_set_uptodate(struct nfs_page *req)
141 {
142         if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
143                 SetPageUptodate(req->wb_page);
144 }
145
146 static void nfs_read_completion(struct nfs_pgio_header *hdr)
147 {
148         unsigned long bytes = 0;
149         int error;
150
151         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
152                 goto out;
153         while (!list_empty(&hdr->pages)) {
154                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
155                 struct page *page = req->wb_page;
156                 unsigned long start = req->wb_pgbase;
157                 unsigned long end = req->wb_pgbase + req->wb_bytes;
158
159                 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
160                         /* note: regions of the page not covered by a
161                          * request are zeroed in readpage_async_filler */
162                         if (bytes > hdr->good_bytes) {
163                                 /* nothing in this request was good, so zero
164                                  * the full extent of the request */
165                                 zero_user_segment(page, start, end);
166
167                         } else if (hdr->good_bytes - bytes < req->wb_bytes) {
168                                 /* part of this request has good bytes, but
169                                  * not all. zero the bad bytes */
170                                 start += hdr->good_bytes - bytes;
171                                 WARN_ON(start < req->wb_pgbase);
172                                 zero_user_segment(page, start, end);
173                         }
174                 }
175                 error = 0;
176                 bytes += req->wb_bytes;
177                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
178                         if (bytes <= hdr->good_bytes)
179                                 nfs_page_group_set_uptodate(req);
180                         else {
181                                 error = hdr->error;
182                                 xchg(&nfs_req_openctx(req)->error, error);
183                         }
184                 } else
185                         nfs_page_group_set_uptodate(req);
186                 nfs_list_remove_request(req);
187                 nfs_readpage_release(req, error);
188         }
189 out:
190         hdr->release(hdr);
191 }
192
193 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
194                               struct rpc_message *msg,
195                               const struct nfs_rpc_ops *rpc_ops,
196                               struct rpc_task_setup *task_setup_data, int how)
197 {
198         struct inode *inode = hdr->inode;
199         int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
200
201         task_setup_data->flags |= swap_flags;
202         rpc_ops->read_setup(hdr, msg);
203         trace_nfs_initiate_read(hdr);
204 }
205
206 static void
207 nfs_async_read_error(struct list_head *head, int error)
208 {
209         struct nfs_page *req;
210
211         while (!list_empty(head)) {
212                 req = nfs_list_entry(head->next);
213                 nfs_list_remove_request(req);
214                 nfs_readpage_release(req, error);
215         }
216 }
217
218 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
219         .error_cleanup = nfs_async_read_error,
220         .completion = nfs_read_completion,
221 };
222
223 /*
224  * This is the callback from RPC telling us whether a reply was
225  * received or some error occurred (timeout or socket shutdown).
226  */
227 static int nfs_readpage_done(struct rpc_task *task,
228                              struct nfs_pgio_header *hdr,
229                              struct inode *inode)
230 {
231         int status = NFS_PROTO(inode)->read_done(task, hdr);
232         if (status != 0)
233                 return status;
234
235         nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
236         trace_nfs_readpage_done(task, hdr);
237
238         if (task->tk_status == -ESTALE) {
239                 nfs_set_inode_stale(inode);
240                 nfs_mark_for_revalidate(inode);
241         }
242         return 0;
243 }
244
245 static void nfs_readpage_retry(struct rpc_task *task,
246                                struct nfs_pgio_header *hdr)
247 {
248         struct nfs_pgio_args *argp = &hdr->args;
249         struct nfs_pgio_res  *resp = &hdr->res;
250
251         /* This is a short read! */
252         nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
253         trace_nfs_readpage_short(task, hdr);
254
255         /* Has the server at least made some progress? */
256         if (resp->count == 0) {
257                 nfs_set_pgio_error(hdr, -EIO, argp->offset);
258                 return;
259         }
260
261         /* For non rpc-based layout drivers, retry-through-MDS */
262         if (!task->tk_ops) {
263                 hdr->pnfs_error = -EAGAIN;
264                 return;
265         }
266
267         /* Yes, so retry the read at the end of the hdr */
268         hdr->mds_offset += resp->count;
269         argp->offset += resp->count;
270         argp->pgbase += resp->count;
271         argp->count -= resp->count;
272         resp->count = 0;
273         resp->eof = 0;
274         rpc_restart_call_prepare(task);
275 }
276
277 static void nfs_readpage_result(struct rpc_task *task,
278                                 struct nfs_pgio_header *hdr)
279 {
280         if (hdr->res.eof) {
281                 loff_t pos = hdr->args.offset + hdr->res.count;
282                 unsigned int new = pos - hdr->io_start;
283
284                 if (hdr->good_bytes > new) {
285                         hdr->good_bytes = new;
286                         set_bit(NFS_IOHDR_EOF, &hdr->flags);
287                         clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
288                 }
289         } else if (hdr->res.count < hdr->args.count)
290                 nfs_readpage_retry(task, hdr);
291 }
292
293 static int
294 readpage_async_filler(void *data, struct page *page)
295 {
296         struct nfs_readdesc *desc = data;
297         struct nfs_page *new;
298         unsigned int len;
299         int error;
300
301         len = nfs_page_length(page);
302         if (len == 0)
303                 return nfs_return_empty_page(page);
304
305         new = nfs_create_request(desc->ctx, page, 0, len);
306         if (IS_ERR(new))
307                 goto out_error;
308
309         if (len < PAGE_SIZE)
310                 zero_user_segment(page, len, PAGE_SIZE);
311         if (!nfs_pageio_add_request(&desc->pgio, new)) {
312                 nfs_list_remove_request(new);
313                 error = desc->pgio.pg_error;
314                 nfs_readpage_release(new, error);
315                 goto out;
316         }
317         return 0;
318 out_error:
319         error = PTR_ERR(new);
320         unlock_page(page);
321 out:
322         return error;
323 }
324
325 /*
326  * Read a page over NFS.
327  * We read the page synchronously in the following case:
328  *  -   The error flag is set for this page. This happens only when a
329  *      previous async read operation failed.
330  */
331 int nfs_readpage(struct file *file, struct page *page)
332 {
333         struct nfs_readdesc desc;
334         struct inode *inode = page_file_mapping(page)->host;
335         int ret;
336
337         dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
338                 page, PAGE_SIZE, page_index(page));
339         nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
340
341         /*
342          * Try to flush any pending writes to the file..
343          *
344          * NOTE! Because we own the page lock, there cannot
345          * be any new pending writes generated at this point
346          * for this page (other pages can be written to).
347          */
348         ret = nfs_wb_page(inode, page);
349         if (ret)
350                 goto out_unlock;
351         if (PageUptodate(page))
352                 goto out_unlock;
353
354         ret = -ESTALE;
355         if (NFS_STALE(inode))
356                 goto out_unlock;
357
358         if (file == NULL) {
359                 ret = -EBADF;
360                 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
361                 if (desc.ctx == NULL)
362                         goto out_unlock;
363         } else
364                 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
365
366         if (!IS_SYNC(inode)) {
367                 ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
368                 if (ret == 0)
369                         goto out;
370         }
371
372         xchg(&desc.ctx->error, 0);
373         nfs_pageio_init_read(&desc.pgio, inode, false,
374                              &nfs_async_read_completion_ops);
375
376         ret = readpage_async_filler(&desc, page);
377
378         if (!ret)
379                 nfs_pageio_complete_read(&desc.pgio, inode);
380
381         ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
382         if (!ret) {
383                 ret = wait_on_page_locked_killable(page);
384                 if (!PageUptodate(page) && !ret)
385                         ret = xchg(&desc.ctx->error, 0);
386         }
387 out:
388         put_nfs_open_context(desc.ctx);
389         return ret;
390 out_unlock:
391         unlock_page(page);
392         return ret;
393 }
394
395 int nfs_readpages(struct file *file, struct address_space *mapping,
396                 struct list_head *pages, unsigned nr_pages)
397 {
398         struct nfs_readdesc desc;
399         struct inode *inode = mapping->host;
400         int ret;
401
402         dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
403                         inode->i_sb->s_id,
404                         (unsigned long long)NFS_FILEID(inode),
405                         nr_pages);
406         nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
407
408         ret = -ESTALE;
409         if (NFS_STALE(inode))
410                 goto out;
411
412         if (file == NULL) {
413                 ret = -EBADF;
414                 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
415                 if (desc.ctx == NULL)
416                         goto out;
417         } else
418                 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
419
420         /* attempt to read as many of the pages as possible from the cache
421          * - this returns -ENOBUFS immediately if the cookie is negative
422          */
423         ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
424                                          pages, &nr_pages);
425         if (ret == 0)
426                 goto read_complete; /* all pages were read */
427
428         nfs_pageio_init_read(&desc.pgio, inode, false,
429                              &nfs_async_read_completion_ops);
430
431         ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
432
433         nfs_pageio_complete_read(&desc.pgio, inode);
434
435 read_complete:
436         put_nfs_open_context(desc.ctx);
437 out:
438         return ret;
439 }
440
441 int __init nfs_init_readpagecache(void)
442 {
443         nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
444                                              sizeof(struct nfs_pgio_header),
445                                              0, SLAB_HWCACHE_ALIGN,
446                                              NULL);
447         if (nfs_rdata_cachep == NULL)
448                 return -ENOMEM;
449
450         return 0;
451 }
452
453 void nfs_destroy_readpagecache(void)
454 {
455         kmem_cache_destroy(nfs_rdata_cachep);
456 }
457
458 static const struct nfs_rw_ops nfs_rw_read_ops = {
459         .rw_alloc_header        = nfs_readhdr_alloc,
460         .rw_free_header         = nfs_readhdr_free,
461         .rw_done                = nfs_readpage_done,
462         .rw_result              = nfs_readpage_result,
463         .rw_initiate            = nfs_initiate_read,
464 };