Merge tag 'trace-v5.16-4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / mm / memory-failure.c
index bbd433e..f64ebb6 100644 (file)
@@ -58,6 +58,7 @@
 #include <linux/ratelimit.h>
 #include <linux/page-isolation.h>
 #include <linux/pagewalk.h>
+#include <linux/shmem_fs.h>
 #include "internal.h"
 #include "ras/ras_event.h"
 
@@ -763,7 +764,7 @@ static int delete_from_lru_cache(struct page *p)
                 * Poisoned page might never drop its ref count to 0 so we have
                 * to uncharge it manually from its memcg.
                 */
-               mem_cgroup_uncharge(p);
+               mem_cgroup_uncharge(page_folio(p));
 
                /*
                 * drop the page count elevated by isolate_lru_page()
@@ -867,6 +868,7 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
 {
        int ret;
        struct address_space *mapping;
+       bool extra_pins;
 
        delete_from_lru_cache(p);
 
@@ -895,18 +897,24 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
                goto out;
        }
 
+       /*
+        * The shmem page is kept in page cache instead of truncating
+        * so is expected to have an extra refcount after error-handling.
+        */
+       extra_pins = shmem_mapping(mapping);
+
        /*
         * Truncation is a bit tricky. Enable it per file system for now.
         *
         * Open: to take i_rwsem or not for this? Right now we don't.
         */
        ret = truncate_error_page(p, page_to_pfn(p), mapping);
+       if (has_extra_refcount(ps, p, extra_pins))
+               ret = MF_FAILED;
+
 out:
        unlock_page(p);
 
-       if (has_extra_refcount(ps, p, false))
-               ret = MF_FAILED;
-
        return ret;
 }
 
@@ -1436,14 +1444,11 @@ static int identify_page_state(unsigned long pfn, struct page *p,
 static int try_to_split_thp_page(struct page *page, const char *msg)
 {
        lock_page(page);
-       if (!PageAnon(page) || unlikely(split_huge_page(page))) {
+       if (unlikely(split_huge_page(page))) {
                unsigned long pfn = page_to_pfn(page);
 
                unlock_page(page);
-               if (!PageAnon(page))
-                       pr_info("%s: %#lx: non anonymous thp\n", msg, pfn);
-               else
-                       pr_info("%s: %#lx: thp split failed\n", msg, pfn);
+               pr_info("%s: %#lx: thp split failed\n", msg, pfn);
                put_page(page);
                return -EBUSY;
        }
@@ -2145,14 +2150,14 @@ static int __soft_offline_page(struct page *page)
                        if (!list_empty(&pagelist))
                                putback_movable_pages(&pagelist);
 
-                       pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n",
-                               pfn, msg_page[huge], ret, page->flags, &page->flags);
+                       pr_info("soft offline: %#lx: %s migration failed %d, type %pGp\n",
+                               pfn, msg_page[huge], ret, &page->flags);
                        if (ret > 0)
                                ret = -EBUSY;
                }
        } else {
-               pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %lx (%pGp)\n",
-                       pfn, msg_page[huge], page_count(page), page->flags, &page->flags);
+               pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
+                       pfn, msg_page[huge], page_count(page), &page->flags);
                ret = -EBUSY;
        }
        return ret;