mm/vmalloc: update the header about KVA rework
[linux-2.6-microblaze.git] / mm / filemap.c
index 991503b..f2bb5ff 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/delayacct.h>
 #include <linux/psi.h>
 #include <linux/ramfs.h>
+#include <linux/page_idle.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -987,19 +988,6 @@ void __init pagecache_init(void)
        page_writeback_init();
 }
 
-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
-struct wait_page_key {
-       struct page *page;
-       int bit_nr;
-       int page_match;
-};
-
-struct wait_page_queue {
-       struct page *page;
-       int bit_nr;
-       wait_queue_entry_t wait;
-};
-
 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
 {
        int ret;
@@ -1007,11 +995,7 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
        struct wait_page_queue *wait_page
                = container_of(wait, struct wait_page_queue, wait);
 
-       if (wait_page->page != key->page)
-              return 0;
-       key->page_match = 1;
-
-       if (wait_page->bit_nr != key->bit_nr)
+       if (!wake_page_match(wait_page, key))
                return 0;
 
        /*
@@ -1240,6 +1224,44 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
 }
 EXPORT_SYMBOL(wait_on_page_bit_killable);
 
+static int __wait_on_page_locked_async(struct page *page,
+                                      struct wait_page_queue *wait, bool set)
+{
+       struct wait_queue_head *q = page_waitqueue(page);
+       int ret = 0;
+
+       wait->page = page;
+       wait->bit_nr = PG_locked;
+
+       spin_lock_irq(&q->lock);
+       __add_wait_queue_entry_tail(q, &wait->wait);
+       SetPageWaiters(page);
+       if (set)
+               ret = !trylock_page(page);
+       else
+               ret = PageLocked(page);
+       /*
+        * If we were succesful now, we know we're still on the
+        * waitqueue as we're still under the lock. This means it's
+        * safe to remove and return success, we know the callback
+        * isn't going to trigger.
+        */
+       if (!ret)
+               __remove_wait_queue(q, &wait->wait);
+       else
+               ret = -EIOCBQUEUED;
+       spin_unlock_irq(&q->lock);
+       return ret;
+}
+
+static int wait_on_page_locked_async(struct page *page,
+                                    struct wait_page_queue *wait)
+{
+       if (!PageLocked(page))
+               return 0;
+       return __wait_on_page_locked_async(compound_head(page), wait, false);
+}
+
 /**
  * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
  * @page: The page to wait for.
@@ -1402,6 +1424,11 @@ int __lock_page_killable(struct page *__page)
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
 
+int __lock_page_async(struct page *page, struct wait_page_queue *wait)
+{
+       return __wait_on_page_locked_async(page, wait, true);
+}
+
 /*
  * Return values:
  * 1 - page is locked; mmap_lock is still held.
@@ -1622,6 +1649,9 @@ EXPORT_SYMBOL(find_lock_entry);
  * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
  *   page is already in cache.  If the page was allocated, unlock it before
  *   returning so the caller can do the same dance.
+ * * %FGP_WRITE - The page will be written
+ * * %FGP_NOFS - __GFP_FS will get cleared in gfp mask
+ * * %FGP_NOWAIT - Don't get blocked by page lock
  *
  * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
  * if the %GFP flags specified for %FGP_CREAT are atomic.
@@ -1663,6 +1693,11 @@ repeat:
 
        if (fgp_flags & FGP_ACCESSED)
                mark_page_accessed(page);
+       else if (fgp_flags & FGP_WRITE) {
+               /* Clear idle flag for buffer write */
+               if (page_is_idle(page))
+                       clear_page_idle(page);
+       }
 
 no_page:
        if (!page && (fgp_flags & FGP_CREAT)) {
@@ -2061,7 +2096,7 @@ find_page:
 
                page = find_get_page(mapping, index);
                if (!page) {
-                       if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
+                       if (iocb->ki_flags & IOCB_NOIO)
                                goto would_block;
                        page_cache_sync_readahead(mapping,
                                        ra, filp,
@@ -2080,17 +2115,25 @@ find_page:
                                        index, last_index - index);
                }
                if (!PageUptodate(page)) {
-                       if (iocb->ki_flags & IOCB_NOWAIT) {
-                               put_page(page);
-                               goto would_block;
-                       }
-
                        /*
                         * See comment in do_read_cache_page on why
                         * wait_on_page_locked is used to avoid unnecessarily
                         * serialisations and why it's safe.
                         */
-                       error = wait_on_page_locked_killable(page);
+                       if (iocb->ki_flags & IOCB_WAITQ) {
+                               if (written) {
+                                       put_page(page);
+                                       goto out;
+                               }
+                               error = wait_on_page_locked_async(page,
+                                                               iocb->ki_waitq);
+                       } else {
+                               if (iocb->ki_flags & IOCB_NOWAIT) {
+                                       put_page(page);
+                                       goto would_block;
+                               }
+                               error = wait_on_page_locked_killable(page);
+                       }
                        if (unlikely(error))
                                goto readpage_error;
                        if (PageUptodate(page))
@@ -2178,7 +2221,10 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               error = lock_page_killable(page);
+               if (iocb->ki_flags & IOCB_WAITQ)
+                       error = lock_page_async(page, iocb->ki_waitq);
+               else
+                       error = lock_page_killable(page);
                if (unlikely(error))
                        goto readpage_error;
 
@@ -2197,7 +2243,7 @@ page_not_up_to_date_locked:
                }
 
 readpage:
-               if (iocb->ki_flags & IOCB_NOIO) {
+               if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT)) {
                        unlock_page(page);
                        put_page(page);
                        goto would_block;