firmware: arm_scmi: Use NULL instead of integer 0 for rate pointer
[linux-2.6-microblaze.git] / mm / filemap.c
index fe079e9..f0ae9a6 100644 (file)
  *  ->i_mutex
  *    ->i_mmap_rwsem           (truncate->unmap_mapping_range)
  *
- *  ->mmap_sem
+ *  ->mmap_lock
  *    ->i_mmap_rwsem
  *      ->page_table_lock or pte_lock  (various, mainly in memory.c)
  *        ->i_pages lock       (arch-dependent flush_dcache_mmap_lock)
  *
- *  ->mmap_sem
+ *  ->mmap_lock
  *    ->lock_page              (access_process_vm)
  *
  *  ->i_mutex                  (generic_perform_write)
- *    ->mmap_sem               (fault_in_pages_readable->do_page_fault)
+ *    ->mmap_lock              (fault_in_pages_readable->do_page_fault)
  *
  *  bdi->wb.list_lock
  *    sb_lock                  (fs/fs-writeback.c)
@@ -199,9 +199,9 @@ static void unaccount_page_cache_page(struct address_space *mapping,
 
        nr = hpage_nr_pages(page);
 
-       __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
+       __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
        if (PageSwapBacked(page)) {
-               __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
+               __mod_lruvec_page_state(page, NR_SHMEM, -nr);
                if (PageTransHuge(page))
                        __dec_node_page_state(page, NR_SHMEM_THPS);
        } else if (PageTransHuge(page)) {
@@ -802,21 +802,22 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
        new->mapping = mapping;
        new->index = offset;
 
+       mem_cgroup_migrate(old, new);
+
        xas_lock_irqsave(&xas, flags);
        xas_store(&xas, new);
 
        old->mapping = NULL;
        /* hugetlb pages do not participate in page cache accounting. */
        if (!PageHuge(old))
-               __dec_node_page_state(new, NR_FILE_PAGES);
+               __dec_lruvec_page_state(old, NR_FILE_PAGES);
        if (!PageHuge(new))
-               __inc_node_page_state(new, NR_FILE_PAGES);
+               __inc_lruvec_page_state(new, NR_FILE_PAGES);
        if (PageSwapBacked(old))
-               __dec_node_page_state(new, NR_SHMEM);
+               __dec_lruvec_page_state(old, NR_SHMEM);
        if (PageSwapBacked(new))
-               __inc_node_page_state(new, NR_SHMEM);
+               __inc_lruvec_page_state(new, NR_SHMEM);
        xas_unlock_irqrestore(&xas, flags);
-       mem_cgroup_migrate(old, new);
        if (freepage)
                freepage(old);
        put_page(old);
@@ -832,7 +833,6 @@ static int __add_to_page_cache_locked(struct page *page,
 {
        XA_STATE(xas, &mapping->i_pages, offset);
        int huge = PageHuge(page);
-       struct mem_cgroup *memcg;
        int error;
        void *old;
 
@@ -840,17 +840,16 @@ static int __add_to_page_cache_locked(struct page *page,
        VM_BUG_ON_PAGE(PageSwapBacked(page), page);
        mapping_set_update(&xas, mapping);
 
-       if (!huge) {
-               error = mem_cgroup_try_charge(page, current->mm,
-                                             gfp_mask, &memcg, false);
-               if (error)
-                       return error;
-       }
-
        get_page(page);
        page->mapping = mapping;
        page->index = offset;
 
+       if (!huge) {
+               error = mem_cgroup_charge(page, current->mm, gfp_mask);
+               if (error)
+                       goto error;
+       }
+
        do {
                xas_lock_irq(&xas);
                old = xas_load(&xas);
@@ -869,25 +868,23 @@ static int __add_to_page_cache_locked(struct page *page,
 
                /* hugetlb pages do not participate in page cache accounting */
                if (!huge)
-                       __inc_node_page_state(page, NR_FILE_PAGES);
+                       __inc_lruvec_page_state(page, NR_FILE_PAGES);
 unlock:
                xas_unlock_irq(&xas);
        } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
 
-       if (xas_error(&xas))
+       if (xas_error(&xas)) {
+               error = xas_error(&xas);
                goto error;
+       }
 
-       if (!huge)
-               mem_cgroup_commit_charge(page, memcg, false, false);
        trace_mm_filemap_add_to_page_cache(page);
        return 0;
 error:
        page->mapping = NULL;
        /* Leave page->index set: truncation relies upon it */
-       if (!huge)
-               mem_cgroup_cancel_charge(page, memcg, false);
        put_page(page);
-       return xas_error(&xas);
+       return error;
 }
 ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
 
@@ -1259,7 +1256,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
  * instead.
  *
  * The read of PG_waiters has to be after (or concurrently with) PG_locked
- * being cleared, but a memory barrier should be unneccssary since it is
+ * being cleared, but a memory barrier should be unnecessary since it is
  * in the same byte as PG_locked.
  */
 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
@@ -1374,27 +1371,27 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
 
 /*
  * Return values:
- * 1 - page is locked; mmap_sem is still held.
+ * 1 - page is locked; mmap_lock is still held.
  * 0 - page is not locked.
- *     mmap_sem has been released (up_read()), unless flags had both
+ *     mmap_lock has been released (mmap_read_unlock(), unless flags had both
  *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
- *     which case mmap_sem is still held.
+ *     which case mmap_lock is still held.
  *
  * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
- * with the page locked and the mmap_sem unperturbed.
+ * with the page locked and the mmap_lock unperturbed.
  */
 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                         unsigned int flags)
 {
        if (fault_flag_allow_retry_first(flags)) {
                /*
-                * CAUTION! In this case, mmap_sem is not released
+                * CAUTION! In this case, mmap_lock is not released
                 * even though return 0.
                 */
                if (flags & FAULT_FLAG_RETRY_NOWAIT)
                        return 0;
 
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (flags & FAULT_FLAG_KILLABLE)
                        wait_on_page_locked_killable(page);
                else
@@ -1406,7 +1403,7 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 
                        ret = __lock_page_killable(page);
                        if (ret) {
-                               up_read(&mm->mmap_sem);
+                               mmap_read_unlock(mm);
                                return 0;
                        }
                } else
@@ -1991,7 +1988,7 @@ static void shrink_readahead_size_eio(struct file_ra_state *ra)
  * * total number of bytes copied, including those the were already @written
  * * negative error code if nothing was copied
  */
-static ssize_t generic_file_buffered_read(struct kiocb *iocb,
+ssize_t generic_file_buffered_read(struct kiocb *iocb,
                struct iov_iter *iter, ssize_t written)
 {
        struct file *filp = iocb->ki_filp;
@@ -2243,6 +2240,7 @@ out:
        file_accessed(filp);
        return written ? written : error;
 }
+EXPORT_SYMBOL_GPL(generic_file_buffered_read);
 
 /**
  * generic_file_read_iter - generic filesystem read routine
@@ -2315,14 +2313,14 @@ EXPORT_SYMBOL(generic_file_read_iter);
 #ifdef CONFIG_MMU
 #define MMAP_LOTSAMISS  (100)
 /*
- * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
+ * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
  * @vmf - the vm_fault for this fault.
  * @page - the page to lock.
  * @fpin - the pointer to the file we may pin (or is already pinned).
  *
- * This works similar to lock_page_or_retry in that it can drop the mmap_sem.
+ * This works similar to lock_page_or_retry in that it can drop the mmap_lock.
  * It differs in that it actually returns the page locked if it returns 1 and 0
- * if it couldn't lock the page.  If we did have to drop the mmap_sem then fpin
+ * if it couldn't lock the page.  If we did have to drop the mmap_lock then fpin
  * will point to the pinned file and needs to be fput()'ed at a later point.
  */
 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
@@ -2333,7 +2331,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 
        /*
         * NOTE! This will make us return with VM_FAULT_RETRY, but with
-        * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT
+        * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
         * is supposed to work. We have way too many special cases..
         */
        if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
@@ -2343,13 +2341,13 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
        if (vmf->flags & FAULT_FLAG_KILLABLE) {
                if (__lock_page_killable(page)) {
                        /*
-                        * We didn't have the right flags to drop the mmap_sem,
+                        * We didn't have the right flags to drop the mmap_lock,
                         * but all fault_handlers only check for fatal signals
                         * if we return VM_FAULT_RETRY, so we need to drop the
-                        * mmap_sem here and return 0 if we don't have a fpin.
+                        * mmap_lock here and return 0 if we don't have a fpin.
                         */
                        if (*fpin == NULL)
-                               up_read(&vmf->vma->vm_mm->mmap_sem);
+                               mmap_read_unlock(vmf->vma->vm_mm);
                        return 0;
                }
        } else
@@ -2411,7 +2409,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
 /*
  * Asynchronous readahead happens when we find the page and PG_readahead,
  * so we want to possibly extend the readahead further.  We return the file that
- * was pinned if we have to drop the mmap_sem in order to do IO.
+ * was pinned if we have to drop the mmap_lock in order to do IO.
  */
 static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
                                            struct page *page)
@@ -2446,12 +2444,12 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
  * it in the page cache, and handles the special cases reasonably without
  * having a lot of duplicated code.
  *
- * vma->vm_mm->mmap_sem must be held on entry.
+ * vma->vm_mm->mmap_lock must be held on entry.
  *
- * If our return value has VM_FAULT_RETRY set, it's because the mmap_sem
+ * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
  * may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
  *
- * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
+ * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
  * has not been released.
  *
  * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
@@ -2521,7 +2519,7 @@ retry_find:
                goto page_not_uptodate;
 
        /*
-        * We've made it this far and we had to drop our mmap_sem, now is the
+        * We've made it this far and we had to drop our mmap_lock, now is the
         * time to return to the upper layer and have it re-find the vma and
         * redo the fault.
         */
@@ -2571,7 +2569,7 @@ page_not_uptodate:
 
 out_retry:
        /*
-        * We dropped the mmap_sem, we need to return to the fault handler to
+        * We dropped the mmap_lock, we need to return to the fault handler to
         * re-find the vma and come back and find our hopefully still populated
         * page.
         */
@@ -2635,7 +2633,7 @@ void filemap_map_pages(struct vm_fault *vmf,
                if (vmf->pte)
                        vmf->pte += xas.xa_index - last_pgoff;
                last_pgoff = xas.xa_index;
-               if (alloc_set_pte(vmf, NULL, page))
+               if (alloc_set_pte(vmf, page))
                        goto unlock;
                unlock_page(page);
                goto next;