ACPI: DPTF: Add PCH FIVR participant driver
[linux-2.6-microblaze.git] / mm / page-writeback.c
index 7326b54..4e4ddd6 100644 (file)
@@ -257,7 +257,7 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
  * requiring writeback.
  *
  * This number of dirtyable pages is the base value of which the
- * user-configurable dirty ratio is the effictive number of pages that
+ * user-configurable dirty ratio is the effective number of pages that
  * are allowed to be actually dirtied.  Per individual zone, or
  * globally by using the sum of dirtyable pages over all zones.
  *
@@ -387,8 +387,7 @@ static unsigned long global_dirtyable_memory(void)
  * Calculate @dtc->thresh and ->bg_thresh considering
  * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
  * must ensure that @dtc->avail is set before calling this function.  The
- * dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
- * real-time tasks.
+ * dirty limits will be lifted by 1/4 for real-time tasks.
  */
 static void domain_dirty_limits(struct dirty_throttle_control *dtc)
 {
@@ -436,7 +435,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        if (bg_thresh >= thresh)
                bg_thresh = thresh / 2;
        tsk = current;
-       if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
+       if (rt_task(tsk)) {
                bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
                thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
        }
@@ -486,7 +485,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
        else
                dirty = vm_dirty_ratio * node_memory / 100;
 
-       if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
+       if (rt_task(tsk))
                dirty += dirty / 4;
 
        return dirty;
@@ -505,15 +504,13 @@ bool node_dirty_ok(struct pglist_data *pgdat)
        unsigned long nr_pages = 0;
 
        nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
-       nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS);
        nr_pages += node_page_state(pgdat, NR_WRITEBACK);
 
        return nr_pages <= limit;
 }
 
 int dirty_background_ratio_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *lenp,
-               loff_t *ppos)
+               void *buffer, size_t *lenp, loff_t *ppos)
 {
        int ret;
 
@@ -524,8 +521,7 @@ int dirty_background_ratio_handler(struct ctl_table *table, int write,
 }
 
 int dirty_background_bytes_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *lenp,
-               loff_t *ppos)
+               void *buffer, size_t *lenp, loff_t *ppos)
 {
        int ret;
 
@@ -535,9 +531,8 @@ int dirty_background_bytes_handler(struct ctl_table *table, int write,
        return ret;
 }
 
-int dirty_ratio_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *lenp,
-               loff_t *ppos)
+int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
+               size_t *lenp, loff_t *ppos)
 {
        int old_ratio = vm_dirty_ratio;
        int ret;
@@ -551,8 +546,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
 }
 
 int dirty_bytes_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *lenp,
-               loff_t *ppos)
+               void *buffer, size_t *lenp, loff_t *ppos)
 {
        unsigned long old_bytes = vm_dirty_bytes;
        int ret;
@@ -759,7 +753,7 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
  * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
  *
  * Return: @wb's dirty limit in pages. The term "dirty" in the context of
- * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
+ * dirty balancing includes all PG_dirty and PG_writeback pages.
  */
 static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 {
@@ -1567,7 +1561,7 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
        struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
                                                     &mdtc_stor : NULL;
        struct dirty_throttle_control *sdtc;
-       unsigned long nr_reclaimable;   /* = file_dirty + unstable_nfs */
+       unsigned long nr_reclaimable;   /* = file_dirty */
        long period;
        long pause;
        long max_pause;
@@ -1587,14 +1581,7 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
                unsigned long m_thresh = 0;
                unsigned long m_bg_thresh = 0;
 
-               /*
-                * Unstable writes are a feature of certain networked
-                * filesystems (i.e. NFS) in which data may have been
-                * written to the server's write cache, but has not yet
-                * been flushed to permanent storage.
-                */
-               nr_reclaimable = global_node_page_state(NR_FILE_DIRTY) +
-                                       global_node_page_state(NR_UNSTABLE_NFS);
+               nr_reclaimable = global_node_page_state(NR_FILE_DIRTY);
                gdtc->avail = global_dirtyable_memory();
                gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
 
@@ -1653,8 +1640,12 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
                if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
                    (!mdtc ||
                     m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
-                       unsigned long intv = dirty_poll_interval(dirty, thresh);
-                       unsigned long m_intv = ULONG_MAX;
+                       unsigned long intv;
+                       unsigned long m_intv;
+
+free_running:
+                       intv = dirty_poll_interval(dirty, thresh);
+                       m_intv = ULONG_MAX;
 
                        current->dirty_paused_when = now;
                        current->nr_dirtied = 0;
@@ -1673,9 +1664,20 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
                 * Calculate global domain's pos_ratio and select the
                 * global dtc by default.
                 */
-               if (!strictlimit)
+               if (!strictlimit) {
                        wb_dirty_limits(gdtc);
 
+                       if ((current->flags & PF_LOCAL_THROTTLE) &&
+                           gdtc->wb_dirty <
+                           dirty_freerun_ceiling(gdtc->wb_thresh,
+                                                 gdtc->wb_bg_thresh))
+                               /*
+                                * LOCAL_THROTTLE tasks must not be throttled
+                                * when below the per-wb freerun ceiling.
+                                */
+                               goto free_running;
+               }
+
                dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
                        ((gdtc->dirty > gdtc->thresh) || strictlimit);
 
@@ -1689,9 +1691,20 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
                         * both global and memcg domains.  Choose the one
                         * w/ lower pos_ratio.
                         */
-                       if (!strictlimit)
+                       if (!strictlimit) {
                                wb_dirty_limits(mdtc);
 
+                               if ((current->flags & PF_LOCAL_THROTTLE) &&
+                                   mdtc->wb_dirty <
+                                   dirty_freerun_ceiling(mdtc->wb_thresh,
+                                                         mdtc->wb_bg_thresh))
+                                       /*
+                                        * LOCAL_THROTTLE tasks must not be
+                                        * throttled when below the per-wb
+                                        * freerun ceiling.
+                                        */
+                                       goto free_running;
+                       }
                        dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
                                ((mdtc->dirty > mdtc->thresh) || strictlimit);
 
@@ -1938,8 +1951,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
         * as we're trying to decide whether to put more under writeback.
         */
        gdtc->avail = global_dirtyable_memory();
-       gdtc->dirty = global_node_page_state(NR_FILE_DIRTY) +
-                     global_node_page_state(NR_UNSTABLE_NFS);
+       gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
        domain_dirty_limits(gdtc);
 
        if (gdtc->dirty > gdtc->bg_thresh)
@@ -1972,7 +1984,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
  */
 int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
-       void __user *buffer, size_t *length, loff_t *ppos)
+               void *buffer, size_t *length, loff_t *ppos)
 {
        unsigned int old_interval = dirty_writeback_interval;
        int ret;
@@ -2064,13 +2076,11 @@ static int page_writeback_cpu_online(unsigned int cpu)
  * Called early on to tune the page writeback dirty limits.
  *
  * We used to scale dirty pages according to how total memory
- * related to pages that could be allocated for buffers (by
- * comparing nr_free_buffer_pages() to vm_total_pages.
+ * related to pages that could be allocated for buffers.
  *
  * However, that was when we used "dirty_ratio" to scale with
  * all memory, and we don't do that any more. "dirty_ratio"
- * is now applied to total non-HIGHPAGE memory (by subtracting
- * totalhigh_pages from vm_total_pages), and as such we can't
+ * is now applied to total non-HIGHPAGE memory, and as such we can't
  * get into the old insane situation any more where we had
  * large amounts of dirty pages compared to a small amount of
  * non-HIGHMEM memory.
@@ -2164,7 +2174,6 @@ int write_cache_pages(struct address_space *mapping,
        int error;
        struct pagevec pvec;
        int nr_pages;
-       pgoff_t uninitialized_var(writeback_index);
        pgoff_t index;
        pgoff_t end;            /* Inclusive */
        pgoff_t done_index;
@@ -2173,8 +2182,7 @@ int write_cache_pages(struct address_space *mapping,
 
        pagevec_init(&pvec);
        if (wbc->range_cyclic) {
-               writeback_index = mapping->writeback_index; /* prev offset */
-               index = writeback_index;
+               index = mapping->writeback_index; /* prev offset */
                end = -1;
        } else {
                index = wbc->range_start >> PAGE_SHIFT;