Merge tag 'regulator-fix-v5.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / mm / memory-failure.c
index e5a1531..eefd823 100644 (file)
@@ -66,6 +66,19 @@ int sysctl_memory_failure_recovery __read_mostly = 1;
 
 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
 
+static bool __page_handle_poison(struct page *page)
+{
+       bool ret;
+
+       zone_pcp_disable(page_zone(page));
+       ret = dissolve_free_huge_page(page);
+       if (!ret)
+               ret = take_page_off_buddy(page);
+       zone_pcp_enable(page_zone(page));
+
+       return ret;
+}
+
 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
 {
        if (hugepage_or_freepage) {
@@ -73,7 +86,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo
                 * Doing this check for free pages is also fine since dissolve_free_huge_page
                 * returns 0 for non-hugetlb pages as well.
                 */
-               if (dissolve_free_huge_page(page) || !take_page_off_buddy(page))
+               if (!__page_handle_poison(page))
                        /*
                         * We could fail to take off the target page from buddy
                         * for example due to racy page allocation, but that's
@@ -985,7 +998,7 @@ static int me_huge_page(struct page *p, unsigned long pfn)
                 */
                if (PageAnon(hpage))
                        put_page(hpage);
-               if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) {
+               if (__page_handle_poison(p)) {
                        page_ref_inc(p);
                        res = MF_RECOVERED;
                }
@@ -1253,10 +1266,10 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
                                  int flags, struct page **hpagep)
 {
-       enum ttu_flags ttu = TTU_IGNORE_MLOCK;
+       enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
        struct address_space *mapping;
        LIST_HEAD(tokill);
-       bool unmap_success = true;
+       bool unmap_success;
        int kill = 1, forcekill;
        struct page *hpage = *hpagep;
        bool mlocked = PageMlocked(hpage);
@@ -1319,7 +1332,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
                collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
 
        if (!PageHuge(hpage)) {
-               unmap_success = try_to_unmap(hpage, ttu);
+               try_to_unmap(hpage, ttu);
        } else {
                if (!PageAnon(hpage)) {
                        /*
@@ -1327,21 +1340,20 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
                         * could potentially call huge_pmd_unshare.  Because of
                         * this, take semaphore in write mode here and set
                         * TTU_RMAP_LOCKED to indicate we have taken the lock
-                        * at this higer level.
+                        * at this higher level.
                         */
                        mapping = hugetlb_page_mapping_lock_write(hpage);
                        if (mapping) {
-                               unmap_success = try_to_unmap(hpage,
-                                                    ttu|TTU_RMAP_LOCKED);
+                               try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
                                i_mmap_unlock_write(mapping);
-                       } else {
+                       } else
                                pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
-                               unmap_success = false;
-                       }
                } else {
-                       unmap_success = try_to_unmap(hpage, ttu);
+                       try_to_unmap(hpage, ttu);
                }
        }
+
+       unmap_success = !page_mapped(hpage);
        if (!unmap_success)
                pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
                       pfn, page_mapcount(hpage));
@@ -1446,7 +1458,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
                        }
                        unlock_page(head);
                        res = MF_FAILED;
-                       if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) {
+                       if (__page_handle_poison(p)) {
                                page_ref_inc(p);
                                res = MF_RECOVERED;
                        }