selftests: forwarding: Rename bridge_mdb test
[linux-2.6-microblaze.git] / mm / memory.c
index f88c351..8c84209 100644 (file)
@@ -1341,15 +1341,6 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
        return ret;
 }
 
-/*
- * Parameter block passed down to zap_pte_range in exceptional cases.
- */
-struct zap_details {
-       struct folio *single_folio;     /* Locked folio to be unmapped */
-       bool even_cows;                 /* Zap COWed private pages too? */
-       zap_flags_t zap_flags;          /* Extra flags for zapping */
-};
-
 /* Whether we should zap all COWed (private) pages too */
 static inline bool should_zap_cows(struct zap_details *details)
 {
@@ -1720,7 +1711,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
 {
        struct mmu_notifier_range range;
        struct zap_details details = {
-               .zap_flags = ZAP_FLAG_DROP_MARKER,
+               .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
                /* Careful - we need to zap private pages too! */
                .even_cows = true,
        };
@@ -1774,19 +1765,27 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
  *
  * The range must fit into one VMA.
  */
-static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
                unsigned long size, struct zap_details *details)
 {
+       const unsigned long end = address + size;
        struct mmu_notifier_range range;
        struct mmu_gather tlb;
 
        lru_add_drain();
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
-                               address, address + size);
+                               address, end);
+       if (is_vm_hugetlb_page(vma))
+               adjust_range_if_pmd_sharing_possible(vma, &range.start,
+                                                    &range.end);
        tlb_gather_mmu(&tlb, vma->vm_mm);
        update_hiwater_rss(vma->vm_mm);
        mmu_notifier_invalidate_range_start(&range);
-       unmap_single_vma(&tlb, vma, address, range.end, details);
+       /*
+        * unmap 'address-end' not 'range.start-range.end' as range
+        * could have been expanded for hugetlb pmd sharing.
+        */
+       unmap_single_vma(&tlb, vma, address, end, details);
        mmu_notifier_invalidate_range_end(&range);
        tlb_finish_mmu(&tlb);
 }
@@ -3763,7 +3762,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                         */
                        get_page(vmf->page);
                        pte_unmap_unlock(vmf->pte, vmf->ptl);
-                       vmf->page->pgmap->ops->migrate_to_ram(vmf);
+                       ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
                        put_page(vmf->page);
                } else if (is_hwpoison_entry(entry)) {
                        ret = VM_FAULT_HWPOISON;