Merge tag 'block-5.13-2021-05-14' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / mm / swap.c
index c94f55e..dfb48cf 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -36,6 +36,7 @@
 #include <linux/hugetlb.h>
 #include <linux/page_idle.h>
 #include <linux/local_lock.h>
+#include <linux/buffer_head.h>
 
 #include "internal.h"
 
@@ -495,7 +496,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
        if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
                int nr_pages = thp_nr_pages(page);
                /*
-                * We use the irq-unsafe __mod_zone_page_stat because this
+                * We use the irq-unsafe __mod_zone_page_state because this
                 * counter is not modified from interrupt context, and the pte
                 * lock is held(spinlock), which implies preemption disabled.
                 */
@@ -641,6 +642,7 @@ void lru_add_drain_cpu(int cpu)
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
        activate_page_drain(cpu);
+       invalidate_bh_lrus_cpu(cpu);
 }
 
 /**
@@ -806,7 +808,7 @@ inline void __lru_add_drain_all(bool force_all_cpus)
         * below which drains the page vectors.
         *
         * Let x, y, and z represent some system CPU numbers, where x < y < z.
-        * Assume CPU #z is is in the middle of the for_each_online_cpu loop
+        * Assume CPU #z is in the middle of the for_each_online_cpu loop
         * below and has already reached CPU #y's per-cpu data. CPU #x comes
         * along, adds some pages to its per-cpu vectors, then calls
         * lru_add_drain_all().
@@ -828,7 +830,8 @@ inline void __lru_add_drain_all(bool force_all_cpus)
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
-                   need_activate_page_drain(cpu)) {
+                   need_activate_page_drain(cpu) ||
+                   has_bh_in_lru(cpu, NULL)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
                        queue_work_on(cpu, mm_percpu_wq, work);
                        __cpumask_set_cpu(cpu, &has_work);