Merge tag 'drm-next-2020-06-02' of git://anongit.freedesktop.org/drm/drm
[linux-2.6-microblaze.git] / mm / swap.c
index bf9a79f..0ac463d 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -35,6 +35,7 @@
 #include <linux/uio.h>
 #include <linux/hugetlb.h>
 #include <linux/page_idle.h>
+#include <linux/local_lock.h>
 
 #include "internal.h"
 
 /* How many pages do we try to swap or page in/out together? */
 int page_cluster;
 
-static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
-static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
+/* Protecting only lru_rotate.pvec which requires disabling interrupts */
+struct lru_rotate {
+       local_lock_t lock;
+       struct pagevec pvec;
+};
+static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
+       .lock = INIT_LOCAL_LOCK(lock),
+};
+
+/*
+ * The following struct pagevec are grouped together because they are protected
+ * by disabling preemption (and interrupts remain enabled).
+ */
+struct lru_pvecs {
+       local_lock_t lock;
+       struct pagevec lru_add;
+       struct pagevec lru_deactivate_file;
+       struct pagevec lru_deactivate;
+       struct pagevec lru_lazyfree;
 #ifdef CONFIG_SMP
-static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
+       struct pagevec activate_page;
 #endif
+};
+static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
+       .lock = INIT_LOCAL_LOCK(lock),
+};
 
 /*
  * This path almost never happens for VM activity - pages are normally
@@ -254,11 +273,11 @@ void rotate_reclaimable_page(struct page *page)
                unsigned long flags;
 
                get_page(page);
-               local_irq_save(flags);
-               pvec = this_cpu_ptr(&lru_rotate_pvecs);
+               local_lock_irqsave(&lru_rotate.lock, flags);
+               pvec = this_cpu_ptr(&lru_rotate.pvec);
                if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_move_tail(pvec);
-               local_irq_restore(flags);
+               local_unlock_irqrestore(&lru_rotate.lock, flags);
        }
 }
 
@@ -293,7 +312,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
 #ifdef CONFIG_SMP
 static void activate_page_drain(int cpu)
 {
-       struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
+       struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
 
        if (pagevec_count(pvec))
                pagevec_lru_move_fn(pvec, __activate_page, NULL);
@@ -301,19 +320,21 @@ static void activate_page_drain(int cpu)
 
 static bool need_activate_page_drain(int cpu)
 {
-       return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
+       return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
 }
 
 void activate_page(struct page *page)
 {
        page = compound_head(page);
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
-               struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+               struct pagevec *pvec;
 
+               local_lock(&lru_pvecs.lock);
+               pvec = this_cpu_ptr(&lru_pvecs.activate_page);
                get_page(page);
                if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, __activate_page, NULL);
-               put_cpu_var(activate_page_pvecs);
+               local_unlock(&lru_pvecs.lock);
        }
 }
 
@@ -335,9 +356,12 @@ void activate_page(struct page *page)
 
 static void __lru_cache_activate_page(struct page *page)
 {
-       struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+       struct pagevec *pvec;
        int i;
 
+       local_lock(&lru_pvecs.lock);
+       pvec = this_cpu_ptr(&lru_pvecs.lru_add);
+
        /*
         * Search backwards on the optimistic assumption that the page being
         * activated has just been added to this pagevec. Note that only
@@ -357,7 +381,7 @@ static void __lru_cache_activate_page(struct page *page)
                }
        }
 
-       put_cpu_var(lru_add_pvec);
+       local_unlock(&lru_pvecs.lock);
 }
 
 /*
@@ -385,7 +409,7 @@ void mark_page_accessed(struct page *page)
        } else if (!PageActive(page)) {
                /*
                 * If the page is on the LRU, queue it for activation via
-                * activate_page_pvecs. Otherwise, assume the page is on a
+                * lru_pvecs.activate_page. Otherwise, assume the page is on a
                 * pagevec, mark it active and it'll be moved to the active
                 * LRU on the next drain.
                 */
@@ -404,12 +428,14 @@ EXPORT_SYMBOL(mark_page_accessed);
 
 static void __lru_cache_add(struct page *page)
 {
-       struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+       struct pagevec *pvec;
 
+       local_lock(&lru_pvecs.lock);
+       pvec = this_cpu_ptr(&lru_pvecs.lru_add);
        get_page(page);
        if (!pagevec_add(pvec, page) || PageCompound(page))
                __pagevec_lru_add(pvec);
-       put_cpu_var(lru_add_pvec);
+       local_unlock(&lru_pvecs.lock);
 }
 
 /**
@@ -593,30 +619,30 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
  */
 void lru_add_drain_cpu(int cpu)
 {
-       struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
+       struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
 
        if (pagevec_count(pvec))
                __pagevec_lru_add(pvec);
 
-       pvec = &per_cpu(lru_rotate_pvecs, cpu);
+       pvec = &per_cpu(lru_rotate.pvec, cpu);
        if (pagevec_count(pvec)) {
                unsigned long flags;
 
                /* No harm done if a racing interrupt already did this */
-               local_irq_save(flags);
+               local_lock_irqsave(&lru_rotate.lock, flags);
                pagevec_move_tail(pvec);
-               local_irq_restore(flags);
+               local_unlock_irqrestore(&lru_rotate.lock, flags);
        }
 
-       pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
+       pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
        if (pagevec_count(pvec))
                pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 
-       pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+       pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
        if (pagevec_count(pvec))
                pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 
-       pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
+       pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
        if (pagevec_count(pvec))
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
 
@@ -641,11 +667,14 @@ void deactivate_file_page(struct page *page)
                return;
 
        if (likely(get_page_unless_zero(page))) {
-               struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
+               struct pagevec *pvec;
+
+               local_lock(&lru_pvecs.lock);
+               pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
 
                if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
-               put_cpu_var(lru_deactivate_file_pvecs);
+               local_unlock(&lru_pvecs.lock);
        }
 }
 
@@ -660,12 +689,14 @@ void deactivate_file_page(struct page *page)
 void deactivate_page(struct page *page)
 {
        if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
-               struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+               struct pagevec *pvec;
 
+               local_lock(&lru_pvecs.lock);
+               pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
                get_page(page);
                if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
-               put_cpu_var(lru_deactivate_pvecs);
+               local_unlock(&lru_pvecs.lock);
        }
 }
 
@@ -680,19 +711,30 @@ void mark_page_lazyfree(struct page *page)
 {
        if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
            !PageSwapCache(page) && !PageUnevictable(page)) {
-               struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
+               struct pagevec *pvec;
 
+               local_lock(&lru_pvecs.lock);
+               pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
                get_page(page);
                if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
-               put_cpu_var(lru_lazyfree_pvecs);
+               local_unlock(&lru_pvecs.lock);
        }
 }
 
 void lru_add_drain(void)
 {
-       lru_add_drain_cpu(get_cpu());
-       put_cpu();
+       local_lock(&lru_pvecs.lock);
+       lru_add_drain_cpu(smp_processor_id());
+       local_unlock(&lru_pvecs.lock);
+}
+
+void lru_add_drain_cpu_zone(struct zone *zone)
+{
+       local_lock(&lru_pvecs.lock);
+       lru_add_drain_cpu(smp_processor_id());
+       drain_local_pages(zone);
+       local_unlock(&lru_pvecs.lock);
 }
 
 #ifdef CONFIG_SMP
@@ -743,11 +785,11 @@ void lru_add_drain_all(void)
        for_each_online_cpu(cpu) {
                struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 
-               if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
-                   pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
-                   pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
-                   pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
-                   pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
+               if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
+                   pagevec_count(&per_cpu(lru_rotate.pvec, cpu)) ||
+                   pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
+                   pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
+                   pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
                    need_activate_page_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
                        queue_work_on(cpu, mm_percpu_wq, work);