arm64: mte: Add asynchronous mode support
[linux-2.6-microblaze.git] / mm / vmstat.c
index f894216..74b2c37 100644 (file)
@@ -342,6 +342,12 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
        long t;
 
        if (vmstat_item_in_bytes(item)) {
+               /*
+                * Only cgroups use subpage accounting right now; at
+                * the global level, these items still change in
+                * multiples of whole pages. Store them as pages
+                * internally to keep the per-cpu counters compact.
+                */
                VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
                delta >>= PAGE_SHIFT;
        }
@@ -551,6 +557,12 @@ static inline void mod_node_state(struct pglist_data *pgdat,
        long o, n, t, z;
 
        if (vmstat_item_in_bytes(item)) {
+               /*
+                * Only cgroups use subpage accounting right now; at
+                * the global level, these items still change in
+                * multiples of whole pages. Store them as pages
+                * internally to keep the per-cpu counters compact.
+                */
                VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
                delta >>= PAGE_SHIFT;
        }
@@ -1215,6 +1227,9 @@ const char * const vmstat_text[] = {
        "nr_shadow_call_stack",
 #endif
        "nr_page_table_pages",
+#ifdef CONFIG_SWAP
+       "nr_swapcached",
+#endif
 
        /* enum writeback_stat_item counters */
        "nr_dirty_threshold",
@@ -1619,8 +1634,12 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
        if (is_zone_first_populated(pgdat, zone)) {
                seq_printf(m, "\n  per-node stats");
                for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
+                       unsigned long pages = node_page_state_pages(pgdat, i);
+
+                       if (vmstat_item_print_in_thp(i))
+                               pages /= HPAGE_PMD_NR;
                        seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
-                                  node_page_state_pages(pgdat, i));
+                                  pages);
                }
        }
        seq_printf(m,
@@ -1630,14 +1649,16 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        high     %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu"
-                  "\n        managed  %lu",
+                  "\n        managed  %lu"
+                  "\n        cma      %lu",
                   zone_page_state(zone, NR_FREE_PAGES),
                   min_wmark_pages(zone),
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
                   zone->spanned_pages,
                   zone->present_pages,
-                  zone_managed_pages(zone));
+                  zone_managed_pages(zone),
+                  zone_cma_pages(zone));
 
        seq_printf(m,
                   "\n        protection: (%ld",
@@ -1740,8 +1761,11 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
        v += NR_VM_NUMA_STAT_ITEMS;
 #endif
 
-       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
                v[i] = global_node_page_state_pages(i);
+               if (vmstat_item_print_in_thp(i))
+                       v[i] /= HPAGE_PMD_NR;
+       }
        v += NR_VM_NODE_STAT_ITEMS;
 
        global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
@@ -1882,16 +1906,12 @@ static void vmstat_update(struct work_struct *w)
  */
 static bool need_update(int cpu)
 {
+       pg_data_t *last_pgdat = NULL;
        struct zone *zone;
 
        for_each_populated_zone(zone) {
                struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
-
-               BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
-#ifdef CONFIG_NUMA
-               BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
-#endif
-
+               struct per_cpu_nodestat *n;
                /*
                 * The fast way of checking if there are any vmstat diffs.
                 */
@@ -1903,6 +1923,13 @@ static bool need_update(int cpu)
                               sizeof(p->vm_numa_stat_diff[0])))
                        return true;
 #endif
+               if (last_pgdat == zone->zone_pgdat)
+                       continue;
+               last_pgdat = zone->zone_pgdat;
+               n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
+               if (memchr_inv(n->vm_node_stat_diff, 0, NR_VM_NODE_STAT_ITEMS *
+                              sizeof(n->vm_node_stat_diff[0])))
+                   return true;
        }
        return false;
 }
@@ -1953,6 +1980,8 @@ static void vmstat_shepherd(struct work_struct *w)
 
                if (!delayed_work_pending(dw) && need_update(cpu))
                        queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
+
+               cond_resched();
        }
        put_online_cpus();