Documentation: KVM: add separate directories for architecture-specific documentation
[linux-2.6-microblaze.git] / fs / nfs / read.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/read.c
4  *
5  * Block I/O for NFS
6  *
7  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8  * modified for async RPC by okir@monad.swb.de
9  */
10
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/pagemap.h>
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/module.h>
23
24 #include "nfs4_fs.h"
25 #include "internal.h"
26 #include "iostat.h"
27 #include "fscache.h"
28 #include "pnfs.h"
29 #include "nfstrace.h"
30
31 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
32
33 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
34 static const struct nfs_rw_ops nfs_rw_read_ops;
35
36 static struct kmem_cache *nfs_rdata_cachep;
37
38 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
39 {
40         struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
41
42         if (p)
43                 p->rw_mode = FMODE_READ;
44         return p;
45 }
46
47 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
48 {
49         kmem_cache_free(nfs_rdata_cachep, rhdr);
50 }
51
52 static
53 int nfs_return_empty_page(struct page *page)
54 {
55         zero_user(page, 0, PAGE_SIZE);
56         SetPageUptodate(page);
57         unlock_page(page);
58         return 0;
59 }
60
61 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
62                               struct inode *inode, bool force_mds,
63                               const struct nfs_pgio_completion_ops *compl_ops)
64 {
65         struct nfs_server *server = NFS_SERVER(inode);
66         const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
67
68 #ifdef CONFIG_NFS_V4_1
69         if (server->pnfs_curr_ld && !force_mds)
70                 pg_ops = server->pnfs_curr_ld->pg_read_ops;
71 #endif
72         nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
73                         server->rsize, 0);
74 }
75 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
76
77 static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
78 {
79         struct nfs_pgio_mirror *pgm;
80         unsigned long npages;
81
82         nfs_pageio_complete(pgio);
83
84         /* It doesn't make sense to do mirrored reads! */
85         WARN_ON_ONCE(pgio->pg_mirror_count != 1);
86
87         pgm = &pgio->pg_mirrors[0];
88         NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
89         npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
90         nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
91 }
92
93
94 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
95 {
96         struct nfs_pgio_mirror *mirror;
97
98         if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
99                 pgio->pg_ops->pg_cleanup(pgio);
100
101         pgio->pg_ops = &nfs_pgio_rw_ops;
102
103         /* read path should never have more than one mirror */
104         WARN_ON_ONCE(pgio->pg_mirror_count != 1);
105
106         mirror = &pgio->pg_mirrors[0];
107         mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
108 }
109 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
110
111 static void nfs_readpage_release(struct nfs_page *req, int error)
112 {
113         struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
114         struct page *page = req->wb_page;
115
116         dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
117                 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
118                 (long long)req_offset(req));
119
120         if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
121                 SetPageError(page);
122         if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
123                 struct address_space *mapping = page_file_mapping(page);
124
125                 if (PageUptodate(page))
126                         nfs_readpage_to_fscache(inode, page);
127                 else if (!PageError(page) && !PagePrivate(page))
128                         generic_error_remove_page(mapping, page);
129                 unlock_page(page);
130         }
131         nfs_release_request(req);
132 }
133
134 struct nfs_readdesc {
135         struct nfs_pageio_descriptor pgio;
136         struct nfs_open_context *ctx;
137 };
138
139 static void nfs_page_group_set_uptodate(struct nfs_page *req)
140 {
141         if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
142                 SetPageUptodate(req->wb_page);
143 }
144
145 static void nfs_read_completion(struct nfs_pgio_header *hdr)
146 {
147         unsigned long bytes = 0;
148         int error;
149
150         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
151                 goto out;
152         while (!list_empty(&hdr->pages)) {
153                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
154                 struct page *page = req->wb_page;
155                 unsigned long start = req->wb_pgbase;
156                 unsigned long end = req->wb_pgbase + req->wb_bytes;
157
158                 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
159                         /* note: regions of the page not covered by a
160                          * request are zeroed in readpage_async_filler */
161                         if (bytes > hdr->good_bytes) {
162                                 /* nothing in this request was good, so zero
163                                  * the full extent of the request */
164                                 zero_user_segment(page, start, end);
165
166                         } else if (hdr->good_bytes - bytes < req->wb_bytes) {
167                                 /* part of this request has good bytes, but
168                                  * not all. zero the bad bytes */
169                                 start += hdr->good_bytes - bytes;
170                                 WARN_ON(start < req->wb_pgbase);
171                                 zero_user_segment(page, start, end);
172                         }
173                 }
174                 error = 0;
175                 bytes += req->wb_bytes;
176                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
177                         if (bytes <= hdr->good_bytes)
178                                 nfs_page_group_set_uptodate(req);
179                         else {
180                                 error = hdr->error;
181                                 xchg(&nfs_req_openctx(req)->error, error);
182                         }
183                 } else
184                         nfs_page_group_set_uptodate(req);
185                 nfs_list_remove_request(req);
186                 nfs_readpage_release(req, error);
187         }
188 out:
189         hdr->release(hdr);
190 }
191
192 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
193                               struct rpc_message *msg,
194                               const struct nfs_rpc_ops *rpc_ops,
195                               struct rpc_task_setup *task_setup_data, int how)
196 {
197         struct inode *inode = hdr->inode;
198         int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
199
200         task_setup_data->flags |= swap_flags;
201         rpc_ops->read_setup(hdr, msg);
202         trace_nfs_initiate_read(hdr);
203 }
204
205 static void
206 nfs_async_read_error(struct list_head *head, int error)
207 {
208         struct nfs_page *req;
209
210         while (!list_empty(head)) {
211                 req = nfs_list_entry(head->next);
212                 nfs_list_remove_request(req);
213                 nfs_readpage_release(req, error);
214         }
215 }
216
217 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
218         .error_cleanup = nfs_async_read_error,
219         .completion = nfs_read_completion,
220 };
221
222 /*
223  * This is the callback from RPC telling us whether a reply was
224  * received or some error occurred (timeout or socket shutdown).
225  */
226 static int nfs_readpage_done(struct rpc_task *task,
227                              struct nfs_pgio_header *hdr,
228                              struct inode *inode)
229 {
230         int status = NFS_PROTO(inode)->read_done(task, hdr);
231         if (status != 0)
232                 return status;
233
234         nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
235         trace_nfs_readpage_done(task, hdr);
236
237         if (task->tk_status == -ESTALE) {
238                 nfs_set_inode_stale(inode);
239                 nfs_mark_for_revalidate(inode);
240         }
241         return 0;
242 }
243
244 static void nfs_readpage_retry(struct rpc_task *task,
245                                struct nfs_pgio_header *hdr)
246 {
247         struct nfs_pgio_args *argp = &hdr->args;
248         struct nfs_pgio_res  *resp = &hdr->res;
249
250         /* This is a short read! */
251         nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
252         trace_nfs_readpage_short(task, hdr);
253
254         /* Has the server at least made some progress? */
255         if (resp->count == 0) {
256                 nfs_set_pgio_error(hdr, -EIO, argp->offset);
257                 return;
258         }
259
260         /* For non rpc-based layout drivers, retry-through-MDS */
261         if (!task->tk_ops) {
262                 hdr->pnfs_error = -EAGAIN;
263                 return;
264         }
265
266         /* Yes, so retry the read at the end of the hdr */
267         hdr->mds_offset += resp->count;
268         argp->offset += resp->count;
269         argp->pgbase += resp->count;
270         argp->count -= resp->count;
271         resp->count = 0;
272         resp->eof = 0;
273         rpc_restart_call_prepare(task);
274 }
275
276 static void nfs_readpage_result(struct rpc_task *task,
277                                 struct nfs_pgio_header *hdr)
278 {
279         if (hdr->res.eof) {
280                 loff_t pos = hdr->args.offset + hdr->res.count;
281                 unsigned int new = pos - hdr->io_start;
282
283                 if (hdr->good_bytes > new) {
284                         hdr->good_bytes = new;
285                         set_bit(NFS_IOHDR_EOF, &hdr->flags);
286                         clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
287                 }
288         } else if (hdr->res.count < hdr->args.count)
289                 nfs_readpage_retry(task, hdr);
290 }
291
292 static int
293 readpage_async_filler(void *data, struct page *page)
294 {
295         struct nfs_readdesc *desc = data;
296         struct inode *inode = page_file_mapping(page)->host;
297         unsigned int rsize = NFS_SERVER(inode)->rsize;
298         struct nfs_page *new;
299         unsigned int len, aligned_len;
300         int error;
301
302         len = nfs_page_length(page);
303         if (len == 0)
304                 return nfs_return_empty_page(page);
305
306         aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE);
307
308         if (!IS_SYNC(page->mapping->host)) {
309                 error = nfs_readpage_from_fscache(page->mapping->host, page);
310                 if (error == 0)
311                         goto out_unlock;
312         }
313
314         new = nfs_create_request(desc->ctx, page, 0, aligned_len);
315         if (IS_ERR(new))
316                 goto out_error;
317
318         if (len < PAGE_SIZE)
319                 zero_user_segment(page, len, PAGE_SIZE);
320         if (!nfs_pageio_add_request(&desc->pgio, new)) {
321                 nfs_list_remove_request(new);
322                 error = desc->pgio.pg_error;
323                 nfs_readpage_release(new, error);
324                 goto out;
325         }
326         return 0;
327 out_error:
328         error = PTR_ERR(new);
329 out_unlock:
330         unlock_page(page);
331 out:
332         return error;
333 }
334
335 /*
336  * Read a page over NFS.
337  * We read the page synchronously in the following case:
338  *  -   The error flag is set for this page. This happens only when a
339  *      previous async read operation failed.
340  */
341 int nfs_readpage(struct file *file, struct page *page)
342 {
343         struct nfs_readdesc desc;
344         struct inode *inode = page_file_mapping(page)->host;
345         int ret;
346
347         trace_nfs_aop_readpage(inode, page);
348         nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
349
350         /*
351          * Try to flush any pending writes to the file..
352          *
353          * NOTE! Because we own the page lock, there cannot
354          * be any new pending writes generated at this point
355          * for this page (other pages can be written to).
356          */
357         ret = nfs_wb_page(inode, page);
358         if (ret)
359                 goto out_unlock;
360         if (PageUptodate(page))
361                 goto out_unlock;
362
363         ret = -ESTALE;
364         if (NFS_STALE(inode))
365                 goto out_unlock;
366
367         if (file == NULL) {
368                 ret = -EBADF;
369                 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
370                 if (desc.ctx == NULL)
371                         goto out_unlock;
372         } else
373                 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
374
375         xchg(&desc.ctx->error, 0);
376         nfs_pageio_init_read(&desc.pgio, inode, false,
377                              &nfs_async_read_completion_ops);
378
379         ret = readpage_async_filler(&desc, page);
380         if (ret)
381                 goto out;
382
383         nfs_pageio_complete_read(&desc.pgio);
384         ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
385         if (!ret) {
386                 ret = wait_on_page_locked_killable(page);
387                 if (!PageUptodate(page) && !ret)
388                         ret = xchg(&desc.ctx->error, 0);
389         }
390 out:
391         put_nfs_open_context(desc.ctx);
392         trace_nfs_aop_readpage_done(inode, page, ret);
393         return ret;
394 out_unlock:
395         unlock_page(page);
396         trace_nfs_aop_readpage_done(inode, page, ret);
397         return ret;
398 }
399
400 int nfs_readpages(struct file *file, struct address_space *mapping,
401                 struct list_head *pages, unsigned nr_pages)
402 {
403         struct nfs_readdesc desc;
404         struct inode *inode = mapping->host;
405         int ret;
406
407         trace_nfs_aop_readahead(inode, lru_to_page(pages), nr_pages);
408         nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
409
410         ret = -ESTALE;
411         if (NFS_STALE(inode))
412                 goto out;
413
414         if (file == NULL) {
415                 ret = -EBADF;
416                 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
417                 if (desc.ctx == NULL)
418                         goto out;
419         } else
420                 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
421
422         nfs_pageio_init_read(&desc.pgio, inode, false,
423                              &nfs_async_read_completion_ops);
424
425         ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
426
427         nfs_pageio_complete_read(&desc.pgio);
428
429         put_nfs_open_context(desc.ctx);
430 out:
431         trace_nfs_aop_readahead_done(inode, nr_pages, ret);
432         return ret;
433 }
434
435 int __init nfs_init_readpagecache(void)
436 {
437         nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
438                                              sizeof(struct nfs_pgio_header),
439                                              0, SLAB_HWCACHE_ALIGN,
440                                              NULL);
441         if (nfs_rdata_cachep == NULL)
442                 return -ENOMEM;
443
444         return 0;
445 }
446
447 void nfs_destroy_readpagecache(void)
448 {
449         kmem_cache_destroy(nfs_rdata_cachep);
450 }
451
452 static const struct nfs_rw_ops nfs_rw_read_ops = {
453         .rw_alloc_header        = nfs_readhdr_alloc,
454         .rw_free_header         = nfs_readhdr_free,
455         .rw_done                = nfs_readpage_done,
456         .rw_result              = nfs_readpage_result,
457         .rw_initiate            = nfs_initiate_read,
458 };