tools/bootconfig: Add --init option for bconf2ftrace.sh
[linux-2.6-microblaze.git] / mm / rmap.c
index 6cce9ef..9425260 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -672,7 +672,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
  */
 void flush_tlb_batched_pending(struct mm_struct *mm)
 {
-       if (mm->tlb_flush_batched) {
+       if (data_race(mm->tlb_flush_batched)) {
                flush_tlb_mm(mm);
 
                /*
@@ -1130,7 +1130,7 @@ void do_page_add_anon_rmap(struct page *page,
        }
 
        if (first) {
-               int nr = compound ? hpage_nr_pages(page) : 1;
+               int nr = compound ? thp_nr_pages(page) : 1;
                /*
                 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
                 * these counters are not modified in interrupt context, and
@@ -1169,7 +1169,7 @@ void do_page_add_anon_rmap(struct page *page,
 void page_add_new_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address, bool compound)
 {
-       int nr = compound ? hpage_nr_pages(page) : 1;
+       int nr = compound ? thp_nr_pages(page) : 1;
 
        VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
        __SetPageSwapBacked(page);
@@ -1511,9 +1511,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         */
                        entry = make_migration_entry(page, 0);
                        swp_pte = swp_entry_to_pte(entry);
-                       if (pte_soft_dirty(pteval))
+
+                       /*
+                        * pteval maps a zone device page and is therefore
+                        * a swap pte.
+                        */
+                       if (pte_swp_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       if (pte_uffd_wp(pteval))
+                       if (pte_swp_uffd_wp(pteval))
                                swp_pte = pte_swp_mkuffd_wp(swp_pte);
                        set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                        /*
@@ -1860,7 +1865,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                return;
 
        pgoff_start = page_to_pgoff(page);
-       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+       pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
                        pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
@@ -1913,7 +1918,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                return;
 
        pgoff_start = page_to_pgoff(page);
-       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+       pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
        if (!locked)
                i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap,