Merge tag 'block-5.9-2020-09-04' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / mm / filemap.c
index 8e75bce..1aaea26 100644 (file)
@@ -198,7 +198,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
        if (PageHuge(page))
                return;
 
-       nr = hpage_nr_pages(page);
+       nr = thp_nr_pages(page);
 
        __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
        if (PageSwapBacked(page)) {
@@ -2468,6 +2468,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        struct address_space *mapping = file->f_mapping;
        struct file *fpin = NULL;
        pgoff_t offset = vmf->pgoff;
+       unsigned int mmap_miss;
 
        /* If we don't want any read-ahead, don't bother */
        if (vmf->vma->vm_flags & VM_RAND_READ)
@@ -2483,14 +2484,15 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        }
 
        /* Avoid banging the cache line if not needed */
-       if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
-               ra->mmap_miss++;
+       mmap_miss = READ_ONCE(ra->mmap_miss);
+       if (mmap_miss < MMAP_LOTSAMISS * 10)
+               WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
 
        /*
         * Do we miss much more than hit in this file? If so,
         * stop bothering with read-ahead. It will only hurt.
         */
-       if (ra->mmap_miss > MMAP_LOTSAMISS)
+       if (mmap_miss > MMAP_LOTSAMISS)
                return fpin;
 
        /*
@@ -2516,13 +2518,15 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
        struct file_ra_state *ra = &file->f_ra;
        struct address_space *mapping = file->f_mapping;
        struct file *fpin = NULL;
+       unsigned int mmap_miss;
        pgoff_t offset = vmf->pgoff;
 
        /* If we don't want any read-ahead, don't bother */
        if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
                return fpin;
-       if (ra->mmap_miss > 0)
-               ra->mmap_miss--;
+       mmap_miss = READ_ONCE(ra->mmap_miss);
+       if (mmap_miss)
+               WRITE_ONCE(ra->mmap_miss, --mmap_miss);
        if (PageReadahead(page)) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
                page_cache_async_readahead(mapping, ra, file,
@@ -2688,6 +2692,7 @@ void filemap_map_pages(struct vm_fault *vmf,
        unsigned long max_idx;
        XA_STATE(xas, &mapping->i_pages, start_pgoff);
        struct page *page;
+       unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
 
        rcu_read_lock();
        xas_for_each(&xas, page, end_pgoff) {
@@ -2724,8 +2729,8 @@ void filemap_map_pages(struct vm_fault *vmf,
                if (page->index >= max_idx)
                        goto unlock;
 
-               if (file->f_ra.mmap_miss > 0)
-                       file->f_ra.mmap_miss--;
+               if (mmap_miss > 0)
+                       mmap_miss--;
 
                vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
                if (vmf->pte)
@@ -2745,6 +2750,7 @@ next:
                        break;
        }
        rcu_read_unlock();
+       WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
 }
 EXPORT_SYMBOL(filemap_map_pages);