mm: remove irqsave/restore locking from contexts with irqs enabled
authorJohannes Weiner <hannes@cmpxchg.org>
Thu, 2 Sep 2021 21:53:18 +0000 (14:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Sep 2021 16:58:10 +0000 (09:58 -0700)
The page cache deletion paths all have interrupts enabled, so no need to
use irqsafe/irqrestore locking variants.

They used to have irqs disabled by the memcg lock added in commit
c4843a7593a9 ("memcg: add per cgroup dirty page accounting"), but that has
since been replaced by memcg taking the page lock instead, commit
0a31bc97c80c ("mm: memcontrol: rewrite uncharge AP").

Link: https://lkml.kernel.org/r/20210614211904.14420-1-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/filemap.c
mm/truncate.c
mm/vmscan.c

index d1458ec..4926f16 100644 (file)
@@ -258,12 +258,11 @@ static void page_cache_free_page(struct address_space *mapping,
 void delete_from_page_cache(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
-       unsigned long flags;
 
        BUG_ON(!PageLocked(page));
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       xa_lock_irq(&mapping->i_pages);
        __delete_from_page_cache(page, NULL);
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
 
        page_cache_free_page(mapping, page);
 }
@@ -335,19 +334,18 @@ void delete_from_page_cache_batch(struct address_space *mapping,
                                  struct pagevec *pvec)
 {
        int i;
-       unsigned long flags;
 
        if (!pagevec_count(pvec))
                return;
 
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       xa_lock_irq(&mapping->i_pages);
        for (i = 0; i < pagevec_count(pvec); i++) {
                trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
 
                unaccount_page_cache_page(mapping, pvec->pages[i]);
        }
        page_cache_delete_batch(mapping, pvec);
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
 
        for (i = 0; i < pagevec_count(pvec); i++)
                page_cache_free_page(mapping, pvec->pages[i]);
@@ -821,7 +819,6 @@ void replace_page_cache_page(struct page *old, struct page *new)
        void (*freepage)(struct page *) = mapping->a_ops->freepage;
        pgoff_t offset = old->index;
        XA_STATE(xas, &mapping->i_pages, offset);
-       unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageLocked(old), old);
        VM_BUG_ON_PAGE(!PageLocked(new), new);
@@ -833,7 +830,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
 
        mem_cgroup_migrate(old, new);
 
-       xas_lock_irqsave(&xas, flags);
+       xas_lock_irq(&xas);
        xas_store(&xas, new);
 
        old->mapping = NULL;
@@ -846,7 +843,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
                __dec_lruvec_page_state(old, NR_SHMEM);
        if (PageSwapBacked(new))
                __inc_lruvec_page_state(new, NR_SHMEM);
-       xas_unlock_irqrestore(&xas, flags);
+       xas_unlock_irq(&xas);
        if (freepage)
                freepage(old);
        put_page(old);
index 234ddd8..2adff8f 100644 (file)
@@ -560,21 +560,19 @@ void invalidate_mapping_pagevec(struct address_space *mapping,
 static int
 invalidate_complete_page2(struct address_space *mapping, struct page *page)
 {
-       unsigned long flags;
-
        if (page->mapping != mapping)
                return 0;
 
        if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
                return 0;
 
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       xa_lock_irq(&mapping->i_pages);
        if (PageDirty(page))
                goto failed;
 
        BUG_ON(page_has_private(page));
        __delete_from_page_cache(page, NULL);
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
 
        if (mapping->a_ops->freepage)
                mapping->a_ops->freepage(page);
@@ -582,7 +580,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
        put_page(page); /* pagecache ref */
        return 1;
 failed:
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
        return 0;
 }
 
index 17c4b3f..268ad65 100644 (file)
@@ -1052,14 +1052,13 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
 static int __remove_mapping(struct address_space *mapping, struct page *page,
                            bool reclaimed, struct mem_cgroup *target_memcg)
 {
-       unsigned long flags;
        int refcount;
        void *shadow = NULL;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
 
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       xa_lock_irq(&mapping->i_pages);
        /*
         * The non racy check for a busy page.
         *
@@ -1100,7 +1099,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                if (reclaimed && !mapping_exiting(mapping))
                        shadow = workingset_eviction(page, target_memcg);
                __delete_from_swap_cache(page, swap, shadow);
-               xa_unlock_irqrestore(&mapping->i_pages, flags);
+               xa_unlock_irq(&mapping->i_pages);
                put_swap_page(page, swap);
        } else {
                void (*freepage)(struct page *);
@@ -1126,7 +1125,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                    !mapping_exiting(mapping) && !dax_mapping(mapping))
                        shadow = workingset_eviction(page, target_memcg);
                __delete_from_page_cache(page, shadow);
-               xa_unlock_irqrestore(&mapping->i_pages, flags);
+               xa_unlock_irq(&mapping->i_pages);
 
                if (freepage != NULL)
                        freepage(page);
@@ -1135,7 +1134,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
        return 1;
 
 cannot_free:
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
        return 0;
 }