mm: rmap: remove lock_page_memcg()
authorJohannes Weiner <hannes@cmpxchg.org>
Tue, 6 Dec 2022 17:13:40 +0000 (18:13 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Jan 2023 01:12:42 +0000 (17:12 -0800)
The previous patch made sure charge moving only touches pages for which
page_mapped() is stable.  lock_page_memcg() is no longer needed.

Link: https://lkml.kernel.org/r/20221206171340.139790-3-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Hugh Dickins <hughd@google.com>
Acked-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/rmap.c

index b616870..32e48b1 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1222,9 +1222,6 @@ void page_add_anon_rmap(struct page *page,
        bool compound = flags & RMAP_COMPOUND;
        bool first = true;
 
-       if (unlikely(PageKsm(page)))
-               lock_page_memcg(page);
-
        /* Is page being mapped by PTE? Is this its first map to be added? */
        if (likely(!compound)) {
                first = atomic_inc_and_test(&page->_mapcount);
@@ -1262,15 +1259,14 @@ void page_add_anon_rmap(struct page *page,
        if (nr)
                __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
 
-       if (unlikely(PageKsm(page)))
-               unlock_page_memcg(page);
-
-       /* address might be in next vma when migration races vma_adjust */
-       else if (first)
-               __page_set_anon_rmap(page, vma, address,
-                                    !!(flags & RMAP_EXCLUSIVE));
-       else
-               __page_check_anon_rmap(page, vma, address);
+       if (likely(!PageKsm(page))) {
+               /* address might be in next vma when migration races vma_adjust */
+               if (first)
+                       __page_set_anon_rmap(page, vma, address,
+                                            !!(flags & RMAP_EXCLUSIVE));
+               else
+                       __page_check_anon_rmap(page, vma, address);
+       }
 
        mlock_vma_page(page, vma, compound);
 }
@@ -1329,7 +1325,6 @@ void page_add_file_rmap(struct page *page,
        bool first;
 
        VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
-       lock_page_memcg(page);
 
        /* Is page being mapped by PTE? Is this its first map to be added? */
        if (likely(!compound)) {
@@ -1365,7 +1360,6 @@ void page_add_file_rmap(struct page *page,
                        NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
        if (nr)
                __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
-       unlock_page_memcg(page);
 
        mlock_vma_page(page, vma, compound);
 }
@@ -1394,8 +1388,6 @@ void page_remove_rmap(struct page *page,
                return;
        }
 
-       lock_page_memcg(page);
-
        /* Is page being unmapped by PTE? Is this its last map to be removed? */
        if (likely(!compound)) {
                last = atomic_add_negative(-1, &page->_mapcount);
@@ -1451,8 +1443,6 @@ void page_remove_rmap(struct page *page,
         * and remember that it's only reliable while mapped.
         */
 
-       unlock_page_memcg(page);
-
        munlock_vma_page(page, vma, compound);
 }