Merge tag 'mtd/mtk-spi-nand-for-5.19' into nand/next
[linux-2.6-microblaze.git] / fs / nfs / direct.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/direct.c
4  *
5  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6  *
7  * High-performance uncached I/O for the Linux NFS client
8  *
9  * There are important applications whose performance or correctness
10  * depends on uncached access to file data.  Database clusters
11  * (multiple copies of the same instance running on separate hosts)
12  * implement their own cache coherency protocol that subsumes file
13  * system cache protocols.  Applications that process datasets
14  * considerably larger than the client's memory do not always benefit
15  * from a local cache.  A streaming video server, for instance, has no
16  * need to cache the contents of a file.
17  *
18  * When an application requests uncached I/O, all read and write requests
19  * are made directly to the server; data stored or fetched via these
20  * requests is not cached in the Linux page cache.  The client does not
21  * correct unaligned requests from applications.  All requested bytes are
22  * held on permanent storage before a direct write system call returns to
23  * an application.
24  *
25  * Solaris implements an uncached I/O facility called directio() that
26  * is used for backups and sequential I/O to very large files.  Solaris
27  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28  * an undocumented mount option.
29  *
30  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31  * help from Andrew Morton.
32  *
33  * 18 Dec 2001  Initial implementation for 2.4  --cel
34  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
35  * 08 Jun 2003  Port to 2.5 APIs  --cel
36  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
37  * 15 Sep 2004  Parallel async reads  --cel
38  * 04 May 2005  support O_DIRECT with aio  --cel
39  *
40  */
41
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48 #include <linux/slab.h>
49 #include <linux/task_io_accounting_ops.h>
50 #include <linux/module.h>
51
52 #include <linux/nfs_fs.h>
53 #include <linux/nfs_page.h>
54 #include <linux/sunrpc/clnt.h>
55
56 #include <linux/uaccess.h>
57 #include <linux/atomic.h>
58
59 #include "internal.h"
60 #include "iostat.h"
61 #include "pnfs.h"
62 #include "fscache.h"
63
64 #define NFSDBG_FACILITY         NFSDBG_VFS
65
66 static struct kmem_cache *nfs_direct_cachep;
67
68 struct nfs_direct_req {
69         struct kref             kref;           /* release manager */
70
71         /* I/O parameters */
72         struct nfs_open_context *ctx;           /* file open context info */
73         struct nfs_lock_context *l_ctx;         /* Lock context info */
74         struct kiocb *          iocb;           /* controlling i/o request */
75         struct inode *          inode;          /* target file of i/o */
76
77         /* completion state */
78         atomic_t                io_count;       /* i/os we're waiting for */
79         spinlock_t              lock;           /* protect completion state */
80
81         loff_t                  io_start;       /* Start offset for I/O */
82         ssize_t                 count,          /* bytes actually processed */
83                                 max_count,      /* max expected count */
84                                 bytes_left,     /* bytes left to be sent */
85                                 error;          /* any reported error */
86         struct completion       completion;     /* wait for i/o completion */
87
88         /* commit state */
89         struct nfs_mds_commit_info mds_cinfo;   /* Storage for cinfo */
90         struct pnfs_ds_commit_info ds_cinfo;    /* Storage for cinfo */
91         struct work_struct      work;
92         int                     flags;
93         /* for write */
94 #define NFS_ODIRECT_DO_COMMIT           (1)     /* an unstable reply was received */
95 #define NFS_ODIRECT_RESCHED_WRITES      (2)     /* write verification failed */
96         /* for read */
97 #define NFS_ODIRECT_SHOULD_DIRTY        (3)     /* dirty user-space page after read */
98 #define NFS_ODIRECT_DONE                INT_MAX /* write verification failed */
99 };
100
101 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
102 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
103 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
104 static void nfs_direct_write_schedule_work(struct work_struct *work);
105
106 static inline void get_dreq(struct nfs_direct_req *dreq)
107 {
108         atomic_inc(&dreq->io_count);
109 }
110
111 static inline int put_dreq(struct nfs_direct_req *dreq)
112 {
113         return atomic_dec_and_test(&dreq->io_count);
114 }
115
116 static void
117 nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
118                             const struct nfs_pgio_header *hdr,
119                             ssize_t dreq_len)
120 {
121         if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
122               test_bit(NFS_IOHDR_EOF, &hdr->flags)))
123                 return;
124         if (dreq->max_count >= dreq_len) {
125                 dreq->max_count = dreq_len;
126                 if (dreq->count > dreq_len)
127                         dreq->count = dreq_len;
128
129                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
130                         dreq->error = hdr->error;
131                 else /* Clear outstanding error if this is EOF */
132                         dreq->error = 0;
133         }
134 }
135
136 static void
137 nfs_direct_count_bytes(struct nfs_direct_req *dreq,
138                        const struct nfs_pgio_header *hdr)
139 {
140         loff_t hdr_end = hdr->io_start + hdr->good_bytes;
141         ssize_t dreq_len = 0;
142
143         if (hdr_end > dreq->io_start)
144                 dreq_len = hdr_end - dreq->io_start;
145
146         nfs_direct_handle_truncated(dreq, hdr, dreq_len);
147
148         if (dreq_len > dreq->max_count)
149                 dreq_len = dreq->max_count;
150
151         if (dreq->count < dreq_len)
152                 dreq->count = dreq_len;
153 }
154
155 /**
156  * nfs_direct_IO - NFS address space operation for direct I/O
157  * @iocb: target I/O control block
158  * @iter: I/O buffer
159  *
160  * The presence of this routine in the address space ops vector means
161  * the NFS client supports direct I/O. However, for most direct IO, we
162  * shunt off direct read and write requests before the VFS gets them,
163  * so this method is only ever called for swap.
164  */
165 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
166 {
167         struct inode *inode = iocb->ki_filp->f_mapping->host;
168
169         /* we only support swap file calling nfs_direct_IO */
170         if (!IS_SWAPFILE(inode))
171                 return 0;
172
173         VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
174
175         if (iov_iter_rw(iter) == READ)
176                 return nfs_file_direct_read(iocb, iter, true);
177         return nfs_file_direct_write(iocb, iter, true);
178 }
179
180 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
181 {
182         unsigned int i;
183         for (i = 0; i < npages; i++)
184                 put_page(pages[i]);
185 }
186
187 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
188                               struct nfs_direct_req *dreq)
189 {
190         cinfo->inode = dreq->inode;
191         cinfo->mds = &dreq->mds_cinfo;
192         cinfo->ds = &dreq->ds_cinfo;
193         cinfo->dreq = dreq;
194         cinfo->completion_ops = &nfs_direct_commit_completion_ops;
195 }
196
197 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
198 {
199         struct nfs_direct_req *dreq;
200
201         dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
202         if (!dreq)
203                 return NULL;
204
205         kref_init(&dreq->kref);
206         kref_get(&dreq->kref);
207         init_completion(&dreq->completion);
208         INIT_LIST_HEAD(&dreq->mds_cinfo.list);
209         pnfs_init_ds_commit_info(&dreq->ds_cinfo);
210         INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
211         spin_lock_init(&dreq->lock);
212
213         return dreq;
214 }
215
216 static void nfs_direct_req_free(struct kref *kref)
217 {
218         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
219
220         pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
221         if (dreq->l_ctx != NULL)
222                 nfs_put_lock_context(dreq->l_ctx);
223         if (dreq->ctx != NULL)
224                 put_nfs_open_context(dreq->ctx);
225         kmem_cache_free(nfs_direct_cachep, dreq);
226 }
227
228 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
229 {
230         kref_put(&dreq->kref, nfs_direct_req_free);
231 }
232
233 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
234 {
235         return dreq->bytes_left;
236 }
237 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
238
239 /*
240  * Collects and returns the final error value/byte-count.
241  */
242 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
243 {
244         ssize_t result = -EIOCBQUEUED;
245
246         /* Async requests don't wait here */
247         if (dreq->iocb)
248                 goto out;
249
250         result = wait_for_completion_killable(&dreq->completion);
251
252         if (!result) {
253                 result = dreq->count;
254                 WARN_ON_ONCE(dreq->count < 0);
255         }
256         if (!result)
257                 result = dreq->error;
258
259 out:
260         return (ssize_t) result;
261 }
262
263 /*
264  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
265  * the iocb is still valid here if this is a synchronous request.
266  */
267 static void nfs_direct_complete(struct nfs_direct_req *dreq)
268 {
269         struct inode *inode = dreq->inode;
270
271         inode_dio_end(inode);
272
273         if (dreq->iocb) {
274                 long res = (long) dreq->error;
275                 if (dreq->count != 0) {
276                         res = (long) dreq->count;
277                         WARN_ON_ONCE(dreq->count < 0);
278                 }
279                 dreq->iocb->ki_complete(dreq->iocb, res);
280         }
281
282         complete(&dreq->completion);
283
284         nfs_direct_req_release(dreq);
285 }
286
287 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
288 {
289         unsigned long bytes = 0;
290         struct nfs_direct_req *dreq = hdr->dreq;
291
292         spin_lock(&dreq->lock);
293         if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
294                 spin_unlock(&dreq->lock);
295                 goto out_put;
296         }
297
298         nfs_direct_count_bytes(dreq, hdr);
299         spin_unlock(&dreq->lock);
300
301         while (!list_empty(&hdr->pages)) {
302                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
303                 struct page *page = req->wb_page;
304
305                 if (!PageCompound(page) && bytes < hdr->good_bytes &&
306                     (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
307                         set_page_dirty(page);
308                 bytes += req->wb_bytes;
309                 nfs_list_remove_request(req);
310                 nfs_release_request(req);
311         }
312 out_put:
313         if (put_dreq(dreq))
314                 nfs_direct_complete(dreq);
315         hdr->release(hdr);
316 }
317
318 static void nfs_read_sync_pgio_error(struct list_head *head, int error)
319 {
320         struct nfs_page *req;
321
322         while (!list_empty(head)) {
323                 req = nfs_list_entry(head->next);
324                 nfs_list_remove_request(req);
325                 nfs_release_request(req);
326         }
327 }
328
329 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
330 {
331         get_dreq(hdr->dreq);
332 }
333
334 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
335         .error_cleanup = nfs_read_sync_pgio_error,
336         .init_hdr = nfs_direct_pgio_init,
337         .completion = nfs_direct_read_completion,
338 };
339
340 /*
341  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
342  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
343  * bail and stop sending more reads.  Read length accounting is
344  * handled automatically by nfs_direct_read_result().  Otherwise, if
345  * no requests have been sent, just return an error.
346  */
347
348 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
349                                               struct iov_iter *iter,
350                                               loff_t pos)
351 {
352         struct nfs_pageio_descriptor desc;
353         struct inode *inode = dreq->inode;
354         ssize_t result = -EINVAL;
355         size_t requested_bytes = 0;
356         size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
357
358         nfs_pageio_init_read(&desc, dreq->inode, false,
359                              &nfs_direct_read_completion_ops);
360         get_dreq(dreq);
361         desc.pg_dreq = dreq;
362         inode_dio_begin(inode);
363
364         while (iov_iter_count(iter)) {
365                 struct page **pagevec;
366                 size_t bytes;
367                 size_t pgbase;
368                 unsigned npages, i;
369
370                 result = iov_iter_get_pages_alloc(iter, &pagevec, 
371                                                   rsize, &pgbase);
372                 if (result < 0)
373                         break;
374         
375                 bytes = result;
376                 iov_iter_advance(iter, bytes);
377                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
378                 for (i = 0; i < npages; i++) {
379                         struct nfs_page *req;
380                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
381                         /* XXX do we need to do the eof zeroing found in async_filler? */
382                         req = nfs_create_request(dreq->ctx, pagevec[i],
383                                                  pgbase, req_len);
384                         if (IS_ERR(req)) {
385                                 result = PTR_ERR(req);
386                                 break;
387                         }
388                         req->wb_index = pos >> PAGE_SHIFT;
389                         req->wb_offset = pos & ~PAGE_MASK;
390                         if (!nfs_pageio_add_request(&desc, req)) {
391                                 result = desc.pg_error;
392                                 nfs_release_request(req);
393                                 break;
394                         }
395                         pgbase = 0;
396                         bytes -= req_len;
397                         requested_bytes += req_len;
398                         pos += req_len;
399                         dreq->bytes_left -= req_len;
400                 }
401                 nfs_direct_release_pages(pagevec, npages);
402                 kvfree(pagevec);
403                 if (result < 0)
404                         break;
405         }
406
407         nfs_pageio_complete(&desc);
408
409         /*
410          * If no bytes were started, return the error, and let the
411          * generic layer handle the completion.
412          */
413         if (requested_bytes == 0) {
414                 inode_dio_end(inode);
415                 nfs_direct_req_release(dreq);
416                 return result < 0 ? result : -EIO;
417         }
418
419         if (put_dreq(dreq))
420                 nfs_direct_complete(dreq);
421         return requested_bytes;
422 }
423
424 /**
425  * nfs_file_direct_read - file direct read operation for NFS files
426  * @iocb: target I/O control block
427  * @iter: vector of user buffers into which to read data
428  * @swap: flag indicating this is swap IO, not O_DIRECT IO
429  *
430  * We use this function for direct reads instead of calling
431  * generic_file_aio_read() in order to avoid gfar's check to see if
432  * the request starts before the end of the file.  For that check
433  * to work, we must generate a GETATTR before each direct read, and
434  * even then there is a window between the GETATTR and the subsequent
435  * READ where the file size could change.  Our preference is simply
436  * to do all reads the application wants, and the server will take
437  * care of managing the end of file boundary.
438  *
439  * This function also eliminates unnecessarily updating the file's
440  * atime locally, as the NFS server sets the file's atime, and this
441  * client must read the updated atime from the server back into its
442  * cache.
443  */
444 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
445                              bool swap)
446 {
447         struct file *file = iocb->ki_filp;
448         struct address_space *mapping = file->f_mapping;
449         struct inode *inode = mapping->host;
450         struct nfs_direct_req *dreq;
451         struct nfs_lock_context *l_ctx;
452         ssize_t result, requested;
453         size_t count = iov_iter_count(iter);
454         nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
455
456         dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
457                 file, count, (long long) iocb->ki_pos);
458
459         result = 0;
460         if (!count)
461                 goto out;
462
463         task_io_account_read(count);
464
465         result = -ENOMEM;
466         dreq = nfs_direct_req_alloc();
467         if (dreq == NULL)
468                 goto out;
469
470         dreq->inode = inode;
471         dreq->bytes_left = dreq->max_count = count;
472         dreq->io_start = iocb->ki_pos;
473         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
474         l_ctx = nfs_get_lock_context(dreq->ctx);
475         if (IS_ERR(l_ctx)) {
476                 result = PTR_ERR(l_ctx);
477                 nfs_direct_req_release(dreq);
478                 goto out_release;
479         }
480         dreq->l_ctx = l_ctx;
481         if (!is_sync_kiocb(iocb))
482                 dreq->iocb = iocb;
483
484         if (iter_is_iovec(iter))
485                 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
486
487         if (!swap)
488                 nfs_start_io_direct(inode);
489
490         NFS_I(inode)->read_io += count;
491         requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
492
493         if (!swap)
494                 nfs_end_io_direct(inode);
495
496         if (requested > 0) {
497                 result = nfs_direct_wait(dreq);
498                 if (result > 0) {
499                         requested -= result;
500                         iocb->ki_pos += result;
501                 }
502                 iov_iter_revert(iter, requested);
503         } else {
504                 result = requested;
505         }
506
507 out_release:
508         nfs_direct_req_release(dreq);
509 out:
510         return result;
511 }
512
513 static void
514 nfs_direct_join_group(struct list_head *list, struct inode *inode)
515 {
516         struct nfs_page *req, *next;
517
518         list_for_each_entry(req, list, wb_list) {
519                 if (req->wb_head != req || req->wb_this_page == req)
520                         continue;
521                 for (next = req->wb_this_page;
522                                 next != req->wb_head;
523                                 next = next->wb_this_page) {
524                         nfs_list_remove_request(next);
525                         nfs_release_request(next);
526                 }
527                 nfs_join_page_group(req, inode);
528         }
529 }
530
531 static void
532 nfs_direct_write_scan_commit_list(struct inode *inode,
533                                   struct list_head *list,
534                                   struct nfs_commit_info *cinfo)
535 {
536         mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
537         pnfs_recover_commit_reqs(list, cinfo);
538         nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
539         mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
540 }
541
542 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
543 {
544         struct nfs_pageio_descriptor desc;
545         struct nfs_page *req, *tmp;
546         LIST_HEAD(reqs);
547         struct nfs_commit_info cinfo;
548         LIST_HEAD(failed);
549
550         nfs_init_cinfo_from_dreq(&cinfo, dreq);
551         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
552
553         nfs_direct_join_group(&reqs, dreq->inode);
554
555         dreq->count = 0;
556         dreq->max_count = 0;
557         list_for_each_entry(req, &reqs, wb_list)
558                 dreq->max_count += req->wb_bytes;
559         nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
560         get_dreq(dreq);
561
562         nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
563                               &nfs_direct_write_completion_ops);
564         desc.pg_dreq = dreq;
565
566         list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
567                 /* Bump the transmission count */
568                 req->wb_nio++;
569                 if (!nfs_pageio_add_request(&desc, req)) {
570                         nfs_list_move_request(req, &failed);
571                         spin_lock(&cinfo.inode->i_lock);
572                         dreq->flags = 0;
573                         if (desc.pg_error < 0)
574                                 dreq->error = desc.pg_error;
575                         else
576                                 dreq->error = -EIO;
577                         spin_unlock(&cinfo.inode->i_lock);
578                 }
579                 nfs_release_request(req);
580         }
581         nfs_pageio_complete(&desc);
582
583         while (!list_empty(&failed)) {
584                 req = nfs_list_entry(failed.next);
585                 nfs_list_remove_request(req);
586                 nfs_unlock_and_release_request(req);
587         }
588
589         if (put_dreq(dreq))
590                 nfs_direct_write_complete(dreq);
591 }
592
593 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
594 {
595         const struct nfs_writeverf *verf = data->res.verf;
596         struct nfs_direct_req *dreq = data->dreq;
597         struct nfs_commit_info cinfo;
598         struct nfs_page *req;
599         int status = data->task.tk_status;
600
601         if (status < 0) {
602                 /* Errors in commit are fatal */
603                 dreq->error = status;
604                 dreq->max_count = 0;
605                 dreq->count = 0;
606                 dreq->flags = NFS_ODIRECT_DONE;
607         } else if (dreq->flags == NFS_ODIRECT_DONE)
608                 status = dreq->error;
609
610         nfs_init_cinfo_from_dreq(&cinfo, dreq);
611
612         while (!list_empty(&data->pages)) {
613                 req = nfs_list_entry(data->pages.next);
614                 nfs_list_remove_request(req);
615                 if (status >= 0 && !nfs_write_match_verf(verf, req)) {
616                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
617                         /*
618                          * Despite the reboot, the write was successful,
619                          * so reset wb_nio.
620                          */
621                         req->wb_nio = 0;
622                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
623                 } else /* Error or match */
624                         nfs_release_request(req);
625                 nfs_unlock_and_release_request(req);
626         }
627
628         if (nfs_commit_end(cinfo.mds))
629                 nfs_direct_write_complete(dreq);
630 }
631
632 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
633                 struct nfs_page *req)
634 {
635         struct nfs_direct_req *dreq = cinfo->dreq;
636
637         spin_lock(&dreq->lock);
638         if (dreq->flags != NFS_ODIRECT_DONE)
639                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
640         spin_unlock(&dreq->lock);
641         nfs_mark_request_commit(req, NULL, cinfo, 0);
642 }
643
644 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
645         .completion = nfs_direct_commit_complete,
646         .resched_write = nfs_direct_resched_write,
647 };
648
649 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
650 {
651         int res;
652         struct nfs_commit_info cinfo;
653         LIST_HEAD(mds_list);
654
655         nfs_init_cinfo_from_dreq(&cinfo, dreq);
656         nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
657         res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
658         if (res < 0) /* res == -ENOMEM */
659                 nfs_direct_write_reschedule(dreq);
660 }
661
662 static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
663 {
664         struct nfs_commit_info cinfo;
665         struct nfs_page *req;
666         LIST_HEAD(reqs);
667
668         nfs_init_cinfo_from_dreq(&cinfo, dreq);
669         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
670
671         while (!list_empty(&reqs)) {
672                 req = nfs_list_entry(reqs.next);
673                 nfs_list_remove_request(req);
674                 nfs_release_request(req);
675                 nfs_unlock_and_release_request(req);
676         }
677 }
678
679 static void nfs_direct_write_schedule_work(struct work_struct *work)
680 {
681         struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
682         int flags = dreq->flags;
683
684         dreq->flags = 0;
685         switch (flags) {
686                 case NFS_ODIRECT_DO_COMMIT:
687                         nfs_direct_commit_schedule(dreq);
688                         break;
689                 case NFS_ODIRECT_RESCHED_WRITES:
690                         nfs_direct_write_reschedule(dreq);
691                         break;
692                 default:
693                         nfs_direct_write_clear_reqs(dreq);
694                         nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
695                         nfs_direct_complete(dreq);
696         }
697 }
698
699 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
700 {
701         queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
702 }
703
704 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
705 {
706         struct nfs_direct_req *dreq = hdr->dreq;
707         struct nfs_commit_info cinfo;
708         struct nfs_page *req = nfs_list_entry(hdr->pages.next);
709         int flags = NFS_ODIRECT_DONE;
710
711         nfs_init_cinfo_from_dreq(&cinfo, dreq);
712
713         spin_lock(&dreq->lock);
714         if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
715                 spin_unlock(&dreq->lock);
716                 goto out_put;
717         }
718
719         nfs_direct_count_bytes(dreq, hdr);
720         if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
721                 if (!dreq->flags)
722                         dreq->flags = NFS_ODIRECT_DO_COMMIT;
723                 flags = dreq->flags;
724         }
725         spin_unlock(&dreq->lock);
726
727         while (!list_empty(&hdr->pages)) {
728
729                 req = nfs_list_entry(hdr->pages.next);
730                 nfs_list_remove_request(req);
731                 if (flags == NFS_ODIRECT_DO_COMMIT) {
732                         kref_get(&req->wb_kref);
733                         memcpy(&req->wb_verf, &hdr->verf.verifier,
734                                sizeof(req->wb_verf));
735                         nfs_mark_request_commit(req, hdr->lseg, &cinfo,
736                                 hdr->ds_commit_idx);
737                 } else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
738                         kref_get(&req->wb_kref);
739                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
740                 }
741                 nfs_unlock_and_release_request(req);
742         }
743
744 out_put:
745         if (put_dreq(dreq))
746                 nfs_direct_write_complete(dreq);
747         hdr->release(hdr);
748 }
749
750 static void nfs_write_sync_pgio_error(struct list_head *head, int error)
751 {
752         struct nfs_page *req;
753
754         while (!list_empty(head)) {
755                 req = nfs_list_entry(head->next);
756                 nfs_list_remove_request(req);
757                 nfs_unlock_and_release_request(req);
758         }
759 }
760
761 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
762 {
763         struct nfs_direct_req *dreq = hdr->dreq;
764
765         spin_lock(&dreq->lock);
766         if (dreq->error == 0) {
767                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
768                 /* fake unstable write to let common nfs resend pages */
769                 hdr->verf.committed = NFS_UNSTABLE;
770                 hdr->good_bytes = hdr->args.offset + hdr->args.count -
771                         hdr->io_start;
772         }
773         spin_unlock(&dreq->lock);
774 }
775
776 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
777         .error_cleanup = nfs_write_sync_pgio_error,
778         .init_hdr = nfs_direct_pgio_init,
779         .completion = nfs_direct_write_completion,
780         .reschedule_io = nfs_direct_write_reschedule_io,
781 };
782
783
784 /*
785  * NB: Return the value of the first error return code.  Subsequent
786  *     errors after the first one are ignored.
787  */
788 /*
789  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
790  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
791  * bail and stop sending more writes.  Write length accounting is
792  * handled automatically by nfs_direct_write_result().  Otherwise, if
793  * no requests have been sent, just return an error.
794  */
795 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
796                                                struct iov_iter *iter,
797                                                loff_t pos, int ioflags)
798 {
799         struct nfs_pageio_descriptor desc;
800         struct inode *inode = dreq->inode;
801         ssize_t result = 0;
802         size_t requested_bytes = 0;
803         size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
804
805         nfs_pageio_init_write(&desc, inode, ioflags, false,
806                               &nfs_direct_write_completion_ops);
807         desc.pg_dreq = dreq;
808         get_dreq(dreq);
809         inode_dio_begin(inode);
810
811         NFS_I(inode)->write_io += iov_iter_count(iter);
812         while (iov_iter_count(iter)) {
813                 struct page **pagevec;
814                 size_t bytes;
815                 size_t pgbase;
816                 unsigned npages, i;
817
818                 result = iov_iter_get_pages_alloc(iter, &pagevec, 
819                                                   wsize, &pgbase);
820                 if (result < 0)
821                         break;
822
823                 bytes = result;
824                 iov_iter_advance(iter, bytes);
825                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
826                 for (i = 0; i < npages; i++) {
827                         struct nfs_page *req;
828                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
829
830                         req = nfs_create_request(dreq->ctx, pagevec[i],
831                                                  pgbase, req_len);
832                         if (IS_ERR(req)) {
833                                 result = PTR_ERR(req);
834                                 break;
835                         }
836
837                         if (desc.pg_error < 0) {
838                                 nfs_free_request(req);
839                                 result = desc.pg_error;
840                                 break;
841                         }
842
843                         nfs_lock_request(req);
844                         req->wb_index = pos >> PAGE_SHIFT;
845                         req->wb_offset = pos & ~PAGE_MASK;
846                         if (!nfs_pageio_add_request(&desc, req)) {
847                                 result = desc.pg_error;
848                                 nfs_unlock_and_release_request(req);
849                                 break;
850                         }
851                         pgbase = 0;
852                         bytes -= req_len;
853                         requested_bytes += req_len;
854                         pos += req_len;
855                         dreq->bytes_left -= req_len;
856                 }
857                 nfs_direct_release_pages(pagevec, npages);
858                 kvfree(pagevec);
859                 if (result < 0)
860                         break;
861         }
862         nfs_pageio_complete(&desc);
863
864         /*
865          * If no bytes were started, return the error, and let the
866          * generic layer handle the completion.
867          */
868         if (requested_bytes == 0) {
869                 inode_dio_end(inode);
870                 nfs_direct_req_release(dreq);
871                 return result < 0 ? result : -EIO;
872         }
873
874         if (put_dreq(dreq))
875                 nfs_direct_write_complete(dreq);
876         return requested_bytes;
877 }
878
879 /**
880  * nfs_file_direct_write - file direct write operation for NFS files
881  * @iocb: target I/O control block
882  * @iter: vector of user buffers from which to write data
883  * @swap: flag indicating this is swap IO, not O_DIRECT IO
884  *
885  * We use this function for direct writes instead of calling
886  * generic_file_aio_write() in order to avoid taking the inode
887  * semaphore and updating the i_size.  The NFS server will set
888  * the new i_size and this client must read the updated size
889  * back into its cache.  We let the server do generic write
890  * parameter checking and report problems.
891  *
892  * We eliminate local atime updates, see direct read above.
893  *
894  * We avoid unnecessary page cache invalidations for normal cached
895  * readers of this file.
896  *
897  * Note that O_APPEND is not supported for NFS direct writes, as there
898  * is no atomic O_APPEND write facility in the NFS protocol.
899  */
900 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
901                               bool swap)
902 {
903         ssize_t result, requested;
904         size_t count;
905         struct file *file = iocb->ki_filp;
906         struct address_space *mapping = file->f_mapping;
907         struct inode *inode = mapping->host;
908         struct nfs_direct_req *dreq;
909         struct nfs_lock_context *l_ctx;
910         loff_t pos, end;
911
912         dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
913                 file, iov_iter_count(iter), (long long) iocb->ki_pos);
914
915         if (swap)
916                 /* bypass generic checks */
917                 result =  iov_iter_count(iter);
918         else
919                 result = generic_write_checks(iocb, iter);
920         if (result <= 0)
921                 return result;
922         count = result;
923         nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
924
925         pos = iocb->ki_pos;
926         end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
927
928         task_io_account_write(count);
929
930         result = -ENOMEM;
931         dreq = nfs_direct_req_alloc();
932         if (!dreq)
933                 goto out;
934
935         dreq->inode = inode;
936         dreq->bytes_left = dreq->max_count = count;
937         dreq->io_start = pos;
938         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
939         l_ctx = nfs_get_lock_context(dreq->ctx);
940         if (IS_ERR(l_ctx)) {
941                 result = PTR_ERR(l_ctx);
942                 nfs_direct_req_release(dreq);
943                 goto out_release;
944         }
945         dreq->l_ctx = l_ctx;
946         if (!is_sync_kiocb(iocb))
947                 dreq->iocb = iocb;
948         pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
949
950         if (swap) {
951                 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
952                                                             FLUSH_STABLE);
953         } else {
954                 nfs_start_io_direct(inode);
955
956                 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
957                                                             FLUSH_COND_STABLE);
958
959                 if (mapping->nrpages) {
960                         invalidate_inode_pages2_range(mapping,
961                                                       pos >> PAGE_SHIFT, end);
962                 }
963
964                 nfs_end_io_direct(inode);
965         }
966
967         if (requested > 0) {
968                 result = nfs_direct_wait(dreq);
969                 if (result > 0) {
970                         requested -= result;
971                         iocb->ki_pos = pos + result;
972                         /* XXX: should check the generic_write_sync retval */
973                         generic_write_sync(iocb, result);
974                 }
975                 iov_iter_revert(iter, requested);
976         } else {
977                 result = requested;
978         }
979         nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
980 out_release:
981         nfs_direct_req_release(dreq);
982 out:
983         return result;
984 }
985
986 /**
987  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
988  *
989  */
990 int __init nfs_init_directcache(void)
991 {
992         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
993                                                 sizeof(struct nfs_direct_req),
994                                                 0, (SLAB_RECLAIM_ACCOUNT|
995                                                         SLAB_MEM_SPREAD),
996                                                 NULL);
997         if (nfs_direct_cachep == NULL)
998                 return -ENOMEM;
999
1000         return 0;
1001 }
1002
1003 /**
1004  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1005  *
1006  */
1007 void nfs_destroy_directcache(void)
1008 {
1009         kmem_cache_destroy(nfs_direct_cachep);
1010 }