Merge tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev...
[linux-2.6-microblaze.git] / mm / page-writeback.c
index 55c2776..d0d466a 100644 (file)
@@ -1554,8 +1554,8 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
  * If we're over `background_thresh' then the writeback threads are woken to
  * perform some writeout.
  */
-static void balance_dirty_pages(struct bdi_writeback *wb,
-                               unsigned long pages_dirtied)
+static int balance_dirty_pages(struct bdi_writeback *wb,
+                              unsigned long pages_dirtied, unsigned int flags)
 {
        struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
        struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
@@ -1575,6 +1575,7 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
        struct backing_dev_info *bdi = wb->bdi;
        bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
        unsigned long start_time = jiffies;
+       int ret = 0;
 
        for (;;) {
                unsigned long now = jiffies;
@@ -1627,6 +1628,19 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
                        }
                }
 
+               /*
+                * In laptop mode, we wait until hitting the higher threshold
+                * before starting background writeout, and then write out all
+                * the way down to the lower threshold.  So slow writers cause
+                * minimal disk activity.
+                *
+                * In normal mode, we start background writeout at the lower
+                * background_thresh, to keep the amount of dirty memory low.
+                */
+               if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh &&
+                   !writeback_in_progress(wb))
+                       wb_start_background_writeback(wb);
+
                /*
                 * Throttle it only when the background writeback cannot
                 * catch-up. This avoids (excessively) small writeouts
@@ -1657,6 +1671,7 @@ free_running:
                        break;
                }
 
+               /* Start writeback even when in laptop mode */
                if (unlikely(!writeback_in_progress(wb)))
                        wb_start_background_writeback(wb);
 
@@ -1715,8 +1730,8 @@ free_running:
                                sdtc = mdtc;
                }
 
-               if (dirty_exceeded && !wb->dirty_exceeded)
-                       wb->dirty_exceeded = 1;
+               if (dirty_exceeded != wb->dirty_exceeded)
+                       wb->dirty_exceeded = dirty_exceeded;
 
                if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
                                           BANDWIDTH_INTERVAL))
@@ -1789,6 +1804,10 @@ pause:
                                          period,
                                          pause,
                                          start_time);
+               if (flags & BDP_ASYNC) {
+                       ret = -EAGAIN;
+                       break;
+               }
                __set_current_state(TASK_KILLABLE);
                wb->dirty_sleep = now;
                io_schedule_timeout(pause);
@@ -1820,26 +1839,7 @@ pause:
                if (fatal_signal_pending(current))
                        break;
        }
-
-       if (!dirty_exceeded && wb->dirty_exceeded)
-               wb->dirty_exceeded = 0;
-
-       if (writeback_in_progress(wb))
-               return;
-
-       /*
-        * In laptop mode, we wait until hitting the higher threshold before
-        * starting background writeout, and then write out all the way down
-        * to the lower threshold.  So slow writers cause minimal disk activity.
-        *
-        * In normal mode, we start background writeout at the lower
-        * background_thresh, to keep the amount of dirty memory low.
-        */
-       if (laptop_mode)
-               return;
-
-       if (nr_reclaimable > gdtc->bg_thresh)
-               wb_start_background_writeback(wb);
+       return ret;
 }
 
 static DEFINE_PER_CPU(int, bdp_ratelimits);
@@ -1861,27 +1861,34 @@ static DEFINE_PER_CPU(int, bdp_ratelimits);
 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
 
 /**
- * balance_dirty_pages_ratelimited - balance dirty memory state
- * @mapping: address_space which was dirtied
+ * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
+ * @mapping: address_space which was dirtied.
+ * @flags: BDP flags.
  *
  * Processes which are dirtying memory should call in here once for each page
  * which was newly dirtied.  The function will periodically check the system's
  * dirty state and will initiate writeback if needed.
  *
- * Once we're over the dirty memory limit we decrease the ratelimiting
- * by a lot, to prevent individual processes from overshooting the limit
- * by (ratelimit_pages) each.
+ * See balance_dirty_pages_ratelimited() for details.
+ *
+ * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
+ * indicate that memory is out of balance and the caller must wait
+ * for I/O to complete.  Otherwise, it will return 0 to indicate
+ * that either memory was already in balance, or it was able to sleep
+ * until the amount of dirty memory returned to balance.
  */
-void balance_dirty_pages_ratelimited(struct address_space *mapping)
+int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
+                                       unsigned int flags)
 {
        struct inode *inode = mapping->host;
        struct backing_dev_info *bdi = inode_to_bdi(inode);
        struct bdi_writeback *wb = NULL;
        int ratelimit;
+       int ret = 0;
        int *p;
 
        if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
-               return;
+               return ret;
 
        if (inode_cgwb_enabled(inode))
                wb = wb_get_create_current(bdi, GFP_KERNEL);
@@ -1921,9 +1928,27 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
        preempt_enable();
 
        if (unlikely(current->nr_dirtied >= ratelimit))
-               balance_dirty_pages(wb, current->nr_dirtied);
+               ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
 
        wb_put(wb);
+       return ret;
+}
+
+/**
+ * balance_dirty_pages_ratelimited - balance dirty memory state.
+ * @mapping: address_space which was dirtied.
+ *
+ * Processes which are dirtying memory should call in here once for each page
+ * which was newly dirtied.  The function will periodically check the system's
+ * dirty state and will initiate writeback if needed.
+ *
+ * Once we're over the dirty memory limit we decrease the ratelimiting
+ * by a lot, to prevent individual processes from overshooting the limit
+ * by (ratelimit_pages) each.
+ */
+void balance_dirty_pages_ratelimited(struct address_space *mapping)
+{
+       balance_dirty_pages_ratelimited_flags(mapping, 0);
 }
 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);