mm/swap_state.c: simplify total_swapcache_pages() with get_swap_device()
authorHuang Ying <ying.huang@intel.com>
Fri, 12 Jul 2019 03:55:37 +0000 (20:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Jul 2019 18:05:43 +0000 (11:05 -0700)
total_swapcache_pages() may race with swapper_spaces[] allocation and
freeing.  Previously, this is protected with a swapper_spaces[] specific
RCU mechanism.  To simplify the logic/code complexity, it is replaced with
get/put_swap_device().  The code line number is reduced too.  Although not
so important, the swapoff() performance improves too because one
synchronize_rcu() call during swapoff() is deleted.

[ying.huang@intel.com: fix bad swap file entry warning]
Link: http://lkml.kernel.org/r/20190531024102.21723-1-ying.huang@intel.com
Link: http://lkml.kernel.org/r/20190527082714.12151-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Andrea Parri <andrea.parri@amarulasolutions.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/swap_state.c

index 61453f1..8368621 100644 (file)
@@ -73,23 +73,24 @@ unsigned long total_swapcache_pages(void)
        unsigned int i, j, nr;
        unsigned long ret = 0;
        struct address_space *spaces;
+       struct swap_info_struct *si;
 
-       rcu_read_lock();
        for (i = 0; i < MAX_SWAPFILES; i++) {
-               /*
-                * The corresponding entries in nr_swapper_spaces and
-                * swapper_spaces will be reused only after at least
-                * one grace period.  So it is impossible for them
-                * belongs to different usage.
-                */
-               nr = nr_swapper_spaces[i];
-               spaces = rcu_dereference(swapper_spaces[i]);
-               if (!nr || !spaces)
+               swp_entry_t entry = swp_entry(i, 1);
+
+               /* Avoid get_swap_device() to warn for bad swap entry */
+               if (!swp_swap_info(entry))
+                       continue;
+               /* Prevent swapoff to free swapper_spaces */
+               si = get_swap_device(entry);
+               if (!si)
                        continue;
+               nr = nr_swapper_spaces[i];
+               spaces = swapper_spaces[i];
                for (j = 0; j < nr; j++)
                        ret += spaces[j].nrpages;
+               put_swap_device(si);
        }
-       rcu_read_unlock();
        return ret;
 }
 
@@ -611,20 +612,16 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
                mapping_set_no_writeback_tags(space);
        }
        nr_swapper_spaces[type] = nr;
-       rcu_assign_pointer(swapper_spaces[type], spaces);
+       swapper_spaces[type] = spaces;
 
        return 0;
 }
 
 void exit_swap_address_space(unsigned int type)
 {
-       struct address_space *spaces;
-
-       spaces = swapper_spaces[type];
+       kvfree(swapper_spaces[type]);
        nr_swapper_spaces[type] = 0;
-       rcu_assign_pointer(swapper_spaces[type], NULL);
-       synchronize_rcu();
-       kvfree(spaces);
+       swapper_spaces[type] = NULL;
 }
 
 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,