Merge tag 'input-for-v6.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor...
[linux-2.6-microblaze.git] / fs / nfs / read.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/read.c
4  *
5  * Block I/O for NFS
6  *
7  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8  * modified for async RPC by okir@monad.swb.de
9  */
10
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/task_io_accounting_ops.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/nfs_fs.h>
22 #include <linux/nfs_page.h>
23 #include <linux/module.h>
24
25 #include "nfs4_fs.h"
26 #include "internal.h"
27 #include "iostat.h"
28 #include "fscache.h"
29 #include "pnfs.h"
30 #include "nfstrace.h"
31
32 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
33
34 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
35 static const struct nfs_rw_ops nfs_rw_read_ops;
36
37 static struct kmem_cache *nfs_rdata_cachep;
38
39 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
40 {
41         struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
42
43         if (p)
44                 p->rw_mode = FMODE_READ;
45         return p;
46 }
47
48 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
49 {
50         kmem_cache_free(nfs_rdata_cachep, rhdr);
51 }
52
53 static int nfs_return_empty_folio(struct folio *folio)
54 {
55         folio_zero_segment(folio, 0, folio_size(folio));
56         folio_mark_uptodate(folio);
57         folio_unlock(folio);
58         return 0;
59 }
60
61 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
62                               struct inode *inode, bool force_mds,
63                               const struct nfs_pgio_completion_ops *compl_ops)
64 {
65         struct nfs_server *server = NFS_SERVER(inode);
66         const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
67
68 #ifdef CONFIG_NFS_V4_1
69         if (server->pnfs_curr_ld && !force_mds)
70                 pg_ops = server->pnfs_curr_ld->pg_read_ops;
71 #endif
72         nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
73                         server->rsize, 0);
74 }
75 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
76
77 static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
78 {
79         struct nfs_pgio_mirror *pgm;
80         unsigned long npages;
81
82         nfs_pageio_complete(pgio);
83
84         /* It doesn't make sense to do mirrored reads! */
85         WARN_ON_ONCE(pgio->pg_mirror_count != 1);
86
87         pgm = &pgio->pg_mirrors[0];
88         NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
89         npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
90         nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
91 }
92
93
94 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
95 {
96         struct nfs_pgio_mirror *mirror;
97
98         if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
99                 pgio->pg_ops->pg_cleanup(pgio);
100
101         pgio->pg_ops = &nfs_pgio_rw_ops;
102
103         /* read path should never have more than one mirror */
104         WARN_ON_ONCE(pgio->pg_mirror_count != 1);
105
106         mirror = &pgio->pg_mirrors[0];
107         mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
108 }
109 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
110
111 static void nfs_readpage_release(struct nfs_page *req, int error)
112 {
113         struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
114         struct folio *folio = nfs_page_to_folio(req);
115
116         dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
117                 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
118                 (long long)req_offset(req));
119
120         if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
121                 folio_set_error(folio);
122         if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
123                 if (folio_test_uptodate(folio))
124                         nfs_fscache_write_page(inode, &folio->page);
125                 folio_unlock(folio);
126         }
127         nfs_release_request(req);
128 }
129
130 struct nfs_readdesc {
131         struct nfs_pageio_descriptor pgio;
132         struct nfs_open_context *ctx;
133 };
134
135 static void nfs_page_group_set_uptodate(struct nfs_page *req)
136 {
137         if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
138                 folio_mark_uptodate(nfs_page_to_folio(req));
139 }
140
141 static void nfs_read_completion(struct nfs_pgio_header *hdr)
142 {
143         unsigned long bytes = 0;
144         int error;
145
146         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
147                 goto out;
148         while (!list_empty(&hdr->pages)) {
149                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
150                 struct folio *folio = nfs_page_to_folio(req);
151                 unsigned long start = req->wb_pgbase;
152                 unsigned long end = req->wb_pgbase + req->wb_bytes;
153
154                 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
155                         /* note: regions of the page not covered by a
156                          * request are zeroed in readpage_async_filler */
157                         if (bytes > hdr->good_bytes) {
158                                 /* nothing in this request was good, so zero
159                                  * the full extent of the request */
160                                 folio_zero_segment(folio, start, end);
161
162                         } else if (hdr->good_bytes - bytes < req->wb_bytes) {
163                                 /* part of this request has good bytes, but
164                                  * not all. zero the bad bytes */
165                                 start += hdr->good_bytes - bytes;
166                                 WARN_ON(start < req->wb_pgbase);
167                                 folio_zero_segment(folio, start, end);
168                         }
169                 }
170                 error = 0;
171                 bytes += req->wb_bytes;
172                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
173                         if (bytes <= hdr->good_bytes)
174                                 nfs_page_group_set_uptodate(req);
175                         else {
176                                 error = hdr->error;
177                                 xchg(&nfs_req_openctx(req)->error, error);
178                         }
179                 } else
180                         nfs_page_group_set_uptodate(req);
181                 nfs_list_remove_request(req);
182                 nfs_readpage_release(req, error);
183         }
184 out:
185         hdr->release(hdr);
186 }
187
188 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
189                               struct rpc_message *msg,
190                               const struct nfs_rpc_ops *rpc_ops,
191                               struct rpc_task_setup *task_setup_data, int how)
192 {
193         rpc_ops->read_setup(hdr, msg);
194         trace_nfs_initiate_read(hdr);
195 }
196
197 static void
198 nfs_async_read_error(struct list_head *head, int error)
199 {
200         struct nfs_page *req;
201
202         while (!list_empty(head)) {
203                 req = nfs_list_entry(head->next);
204                 nfs_list_remove_request(req);
205                 nfs_readpage_release(req, error);
206         }
207 }
208
209 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
210         .error_cleanup = nfs_async_read_error,
211         .completion = nfs_read_completion,
212 };
213
214 /*
215  * This is the callback from RPC telling us whether a reply was
216  * received or some error occurred (timeout or socket shutdown).
217  */
218 static int nfs_readpage_done(struct rpc_task *task,
219                              struct nfs_pgio_header *hdr,
220                              struct inode *inode)
221 {
222         int status = NFS_PROTO(inode)->read_done(task, hdr);
223         if (status != 0)
224                 return status;
225
226         nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
227         trace_nfs_readpage_done(task, hdr);
228
229         if (task->tk_status == -ESTALE) {
230                 nfs_set_inode_stale(inode);
231                 nfs_mark_for_revalidate(inode);
232         }
233         return 0;
234 }
235
236 static void nfs_readpage_retry(struct rpc_task *task,
237                                struct nfs_pgio_header *hdr)
238 {
239         struct nfs_pgio_args *argp = &hdr->args;
240         struct nfs_pgio_res  *resp = &hdr->res;
241
242         /* This is a short read! */
243         nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
244         trace_nfs_readpage_short(task, hdr);
245
246         /* Has the server at least made some progress? */
247         if (resp->count == 0) {
248                 nfs_set_pgio_error(hdr, -EIO, argp->offset);
249                 return;
250         }
251
252         /* For non rpc-based layout drivers, retry-through-MDS */
253         if (!task->tk_ops) {
254                 hdr->pnfs_error = -EAGAIN;
255                 return;
256         }
257
258         /* Yes, so retry the read at the end of the hdr */
259         hdr->mds_offset += resp->count;
260         argp->offset += resp->count;
261         argp->pgbase += resp->count;
262         argp->count -= resp->count;
263         resp->count = 0;
264         resp->eof = 0;
265         rpc_restart_call_prepare(task);
266 }
267
268 static void nfs_readpage_result(struct rpc_task *task,
269                                 struct nfs_pgio_header *hdr)
270 {
271         if (hdr->res.eof) {
272                 loff_t pos = hdr->args.offset + hdr->res.count;
273                 unsigned int new = pos - hdr->io_start;
274
275                 if (hdr->good_bytes > new) {
276                         hdr->good_bytes = new;
277                         set_bit(NFS_IOHDR_EOF, &hdr->flags);
278                         clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
279                 }
280         } else if (hdr->res.count < hdr->args.count)
281                 nfs_readpage_retry(task, hdr);
282 }
283
284 static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio)
285 {
286         struct inode *inode = folio_file_mapping(folio)->host;
287         struct nfs_server *server = NFS_SERVER(inode);
288         size_t fsize = folio_size(folio);
289         unsigned int rsize = server->rsize;
290         struct nfs_page *new;
291         unsigned int len, aligned_len;
292         int error;
293
294         len = nfs_folio_length(folio);
295         if (len == 0)
296                 return nfs_return_empty_folio(folio);
297
298         aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
299
300         if (!IS_SYNC(inode)) {
301                 error = nfs_fscache_read_page(inode, &folio->page);
302                 if (error == 0)
303                         goto out_unlock;
304         }
305
306         new = nfs_page_create_from_folio(desc->ctx, folio, 0, aligned_len);
307         if (IS_ERR(new))
308                 goto out_error;
309
310         if (len < fsize)
311                 folio_zero_segment(folio, len, fsize);
312         if (!nfs_pageio_add_request(&desc->pgio, new)) {
313                 nfs_list_remove_request(new);
314                 error = desc->pgio.pg_error;
315                 nfs_readpage_release(new, error);
316                 goto out;
317         }
318         return 0;
319 out_error:
320         error = PTR_ERR(new);
321 out_unlock:
322         folio_unlock(folio);
323 out:
324         return error;
325 }
326
327 /*
328  * Read a page over NFS.
329  * We read the page synchronously in the following case:
330  *  -   The error flag is set for this page. This happens only when a
331  *      previous async read operation failed.
332  */
333 int nfs_read_folio(struct file *file, struct folio *folio)
334 {
335         struct nfs_readdesc desc;
336         struct inode *inode = file_inode(file);
337         int ret;
338
339         trace_nfs_aop_readpage(inode, folio);
340         nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
341         task_io_account_read(folio_size(folio));
342
343         /*
344          * Try to flush any pending writes to the file..
345          *
346          * NOTE! Because we own the folio lock, there cannot
347          * be any new pending writes generated at this point
348          * for this folio (other folios can be written to).
349          */
350         ret = nfs_wb_folio(inode, folio);
351         if (ret)
352                 goto out_unlock;
353         if (folio_test_uptodate(folio))
354                 goto out_unlock;
355
356         ret = -ESTALE;
357         if (NFS_STALE(inode))
358                 goto out_unlock;
359
360         desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
361
362         xchg(&desc.ctx->error, 0);
363         nfs_pageio_init_read(&desc.pgio, inode, false,
364                              &nfs_async_read_completion_ops);
365
366         ret = readpage_async_filler(&desc, folio);
367         if (ret)
368                 goto out;
369
370         nfs_pageio_complete_read(&desc.pgio);
371         ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
372         if (!ret) {
373                 ret = folio_wait_locked_killable(folio);
374                 if (!folio_test_uptodate(folio) && !ret)
375                         ret = xchg(&desc.ctx->error, 0);
376         }
377 out:
378         put_nfs_open_context(desc.ctx);
379         trace_nfs_aop_readpage_done(inode, folio, ret);
380         return ret;
381 out_unlock:
382         folio_unlock(folio);
383         trace_nfs_aop_readpage_done(inode, folio, ret);
384         return ret;
385 }
386
387 void nfs_readahead(struct readahead_control *ractl)
388 {
389         unsigned int nr_pages = readahead_count(ractl);
390         struct file *file = ractl->file;
391         struct nfs_readdesc desc;
392         struct inode *inode = ractl->mapping->host;
393         struct folio *folio;
394         int ret;
395
396         trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
397         nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
398         task_io_account_read(readahead_length(ractl));
399
400         ret = -ESTALE;
401         if (NFS_STALE(inode))
402                 goto out;
403
404         if (file == NULL) {
405                 ret = -EBADF;
406                 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
407                 if (desc.ctx == NULL)
408                         goto out;
409         } else
410                 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
411
412         nfs_pageio_init_read(&desc.pgio, inode, false,
413                              &nfs_async_read_completion_ops);
414
415         while ((folio = readahead_folio(ractl)) != NULL) {
416                 ret = readpage_async_filler(&desc, folio);
417                 if (ret)
418                         break;
419         }
420
421         nfs_pageio_complete_read(&desc.pgio);
422
423         put_nfs_open_context(desc.ctx);
424 out:
425         trace_nfs_aop_readahead_done(inode, nr_pages, ret);
426 }
427
428 int __init nfs_init_readpagecache(void)
429 {
430         nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
431                                              sizeof(struct nfs_pgio_header),
432                                              0, SLAB_HWCACHE_ALIGN,
433                                              NULL);
434         if (nfs_rdata_cachep == NULL)
435                 return -ENOMEM;
436
437         return 0;
438 }
439
440 void nfs_destroy_readpagecache(void)
441 {
442         kmem_cache_destroy(nfs_rdata_cachep);
443 }
444
445 static const struct nfs_rw_ops nfs_rw_read_ops = {
446         .rw_alloc_header        = nfs_readhdr_alloc,
447         .rw_free_header         = nfs_readhdr_free,
448         .rw_done                = nfs_readpage_done,
449         .rw_result              = nfs_readpage_result,
450         .rw_initiate            = nfs_initiate_read,
451 };