CodingStyle: Inclusive Terminology
[linux-2.6-microblaze.git] / mm / vmstat.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/vmstat.c
4  *
5  *  Manages VM statistics
6  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
7  *
8  *  zoned VM statistics
9  *  Copyright (C) 2006 Silicon Graphics, Inc.,
10  *              Christoph Lameter <christoph@lameter.com>
11  *  Copyright (C) 2008-2014 Christoph Lameter
12  */
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/err.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/cpumask.h>
20 #include <linux/vmstat.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/debugfs.h>
24 #include <linux/sched.h>
25 #include <linux/math64.h>
26 #include <linux/writeback.h>
27 #include <linux/compaction.h>
28 #include <linux/mm_inline.h>
29 #include <linux/page_ext.h>
30 #include <linux/page_owner.h>
31
32 #include "internal.h"
33
34 #define NUMA_STATS_THRESHOLD (U16_MAX - 2)
35
36 #ifdef CONFIG_NUMA
37 int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
38
39 /* zero numa counters within a zone */
40 static void zero_zone_numa_counters(struct zone *zone)
41 {
42         int item, cpu;
43
44         for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
45                 atomic_long_set(&zone->vm_numa_stat[item], 0);
46                 for_each_online_cpu(cpu)
47                         per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
48                                                 = 0;
49         }
50 }
51
52 /* zero numa counters of all the populated zones */
53 static void zero_zones_numa_counters(void)
54 {
55         struct zone *zone;
56
57         for_each_populated_zone(zone)
58                 zero_zone_numa_counters(zone);
59 }
60
61 /* zero global numa counters */
62 static void zero_global_numa_counters(void)
63 {
64         int item;
65
66         for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
67                 atomic_long_set(&vm_numa_stat[item], 0);
68 }
69
70 static void invalid_numa_statistics(void)
71 {
72         zero_zones_numa_counters();
73         zero_global_numa_counters();
74 }
75
76 static DEFINE_MUTEX(vm_numa_stat_lock);
77
78 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
79                 void *buffer, size_t *length, loff_t *ppos)
80 {
81         int ret, oldval;
82
83         mutex_lock(&vm_numa_stat_lock);
84         if (write)
85                 oldval = sysctl_vm_numa_stat;
86         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
87         if (ret || !write)
88                 goto out;
89
90         if (oldval == sysctl_vm_numa_stat)
91                 goto out;
92         else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
93                 static_branch_enable(&vm_numa_stat_key);
94                 pr_info("enable numa statistics\n");
95         } else {
96                 static_branch_disable(&vm_numa_stat_key);
97                 invalid_numa_statistics();
98                 pr_info("disable numa statistics, and clear numa counters\n");
99         }
100
101 out:
102         mutex_unlock(&vm_numa_stat_lock);
103         return ret;
104 }
105 #endif
106
107 #ifdef CONFIG_VM_EVENT_COUNTERS
108 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
109 EXPORT_PER_CPU_SYMBOL(vm_event_states);
110
111 static void sum_vm_events(unsigned long *ret)
112 {
113         int cpu;
114         int i;
115
116         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
117
118         for_each_online_cpu(cpu) {
119                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
120
121                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
122                         ret[i] += this->event[i];
123         }
124 }
125
126 /*
127  * Accumulate the vm event counters across all CPUs.
128  * The result is unavoidably approximate - it can change
129  * during and after execution of this function.
130 */
131 void all_vm_events(unsigned long *ret)
132 {
133         get_online_cpus();
134         sum_vm_events(ret);
135         put_online_cpus();
136 }
137 EXPORT_SYMBOL_GPL(all_vm_events);
138
139 /*
140  * Fold the foreign cpu events into our own.
141  *
142  * This is adding to the events on one processor
143  * but keeps the global counts constant.
144  */
145 void vm_events_fold_cpu(int cpu)
146 {
147         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
148         int i;
149
150         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
151                 count_vm_events(i, fold_state->event[i]);
152                 fold_state->event[i] = 0;
153         }
154 }
155
156 #endif /* CONFIG_VM_EVENT_COUNTERS */
157
158 /*
159  * Manage combined zone based / global counters
160  *
161  * vm_stat contains the global counters
162  */
163 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
164 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
165 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
166 EXPORT_SYMBOL(vm_zone_stat);
167 EXPORT_SYMBOL(vm_numa_stat);
168 EXPORT_SYMBOL(vm_node_stat);
169
170 #ifdef CONFIG_SMP
171
172 int calculate_pressure_threshold(struct zone *zone)
173 {
174         int threshold;
175         int watermark_distance;
176
177         /*
178          * As vmstats are not up to date, there is drift between the estimated
179          * and real values. For high thresholds and a high number of CPUs, it
180          * is possible for the min watermark to be breached while the estimated
181          * value looks fine. The pressure threshold is a reduced value such
182          * that even the maximum amount of drift will not accidentally breach
183          * the min watermark
184          */
185         watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
186         threshold = max(1, (int)(watermark_distance / num_online_cpus()));
187
188         /*
189          * Maximum threshold is 125
190          */
191         threshold = min(125, threshold);
192
193         return threshold;
194 }
195
196 int calculate_normal_threshold(struct zone *zone)
197 {
198         int threshold;
199         int mem;        /* memory in 128 MB units */
200
201         /*
202          * The threshold scales with the number of processors and the amount
203          * of memory per zone. More memory means that we can defer updates for
204          * longer, more processors could lead to more contention.
205          * fls() is used to have a cheap way of logarithmic scaling.
206          *
207          * Some sample thresholds:
208          *
209          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
210          * ------------------------------------------------------------------
211          * 8            1               1       0.9-1 GB        4
212          * 16           2               2       0.9-1 GB        4
213          * 20           2               2       1-2 GB          5
214          * 24           2               2       2-4 GB          6
215          * 28           2               2       4-8 GB          7
216          * 32           2               2       8-16 GB         8
217          * 4            2               2       <128M           1
218          * 30           4               3       2-4 GB          5
219          * 48           4               3       8-16 GB         8
220          * 32           8               4       1-2 GB          4
221          * 32           8               4       0.9-1GB         4
222          * 10           16              5       <128M           1
223          * 40           16              5       900M            4
224          * 70           64              7       2-4 GB          5
225          * 84           64              7       4-8 GB          6
226          * 108          512             9       4-8 GB          6
227          * 125          1024            10      8-16 GB         8
228          * 125          1024            10      16-32 GB        9
229          */
230
231         mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
232
233         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
234
235         /*
236          * Maximum threshold is 125
237          */
238         threshold = min(125, threshold);
239
240         return threshold;
241 }
242
243 /*
244  * Refresh the thresholds for each zone.
245  */
246 void refresh_zone_stat_thresholds(void)
247 {
248         struct pglist_data *pgdat;
249         struct zone *zone;
250         int cpu;
251         int threshold;
252
253         /* Zero current pgdat thresholds */
254         for_each_online_pgdat(pgdat) {
255                 for_each_online_cpu(cpu) {
256                         per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
257                 }
258         }
259
260         for_each_populated_zone(zone) {
261                 struct pglist_data *pgdat = zone->zone_pgdat;
262                 unsigned long max_drift, tolerate_drift;
263
264                 threshold = calculate_normal_threshold(zone);
265
266                 for_each_online_cpu(cpu) {
267                         int pgdat_threshold;
268
269                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
270                                                         = threshold;
271
272                         /* Base nodestat threshold on the largest populated zone. */
273                         pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
274                         per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
275                                 = max(threshold, pgdat_threshold);
276                 }
277
278                 /*
279                  * Only set percpu_drift_mark if there is a danger that
280                  * NR_FREE_PAGES reports the low watermark is ok when in fact
281                  * the min watermark could be breached by an allocation
282                  */
283                 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
284                 max_drift = num_online_cpus() * threshold;
285                 if (max_drift > tolerate_drift)
286                         zone->percpu_drift_mark = high_wmark_pages(zone) +
287                                         max_drift;
288         }
289 }
290
291 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
292                                 int (*calculate_pressure)(struct zone *))
293 {
294         struct zone *zone;
295         int cpu;
296         int threshold;
297         int i;
298
299         for (i = 0; i < pgdat->nr_zones; i++) {
300                 zone = &pgdat->node_zones[i];
301                 if (!zone->percpu_drift_mark)
302                         continue;
303
304                 threshold = (*calculate_pressure)(zone);
305                 for_each_online_cpu(cpu)
306                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
307                                                         = threshold;
308         }
309 }
310
311 /*
312  * For use when we know that interrupts are disabled,
313  * or when we know that preemption is disabled and that
314  * particular counter cannot be updated from interrupt context.
315  */
316 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
317                            long delta)
318 {
319         struct per_cpu_pageset __percpu *pcp = zone->pageset;
320         s8 __percpu *p = pcp->vm_stat_diff + item;
321         long x;
322         long t;
323
324         x = delta + __this_cpu_read(*p);
325
326         t = __this_cpu_read(pcp->stat_threshold);
327
328         if (unlikely(x > t || x < -t)) {
329                 zone_page_state_add(x, zone, item);
330                 x = 0;
331         }
332         __this_cpu_write(*p, x);
333 }
334 EXPORT_SYMBOL(__mod_zone_page_state);
335
336 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
337                                 long delta)
338 {
339         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
340         s8 __percpu *p = pcp->vm_node_stat_diff + item;
341         long x;
342         long t;
343
344         x = delta + __this_cpu_read(*p);
345
346         t = __this_cpu_read(pcp->stat_threshold);
347
348         if (unlikely(x > t || x < -t)) {
349                 node_page_state_add(x, pgdat, item);
350                 x = 0;
351         }
352         __this_cpu_write(*p, x);
353 }
354 EXPORT_SYMBOL(__mod_node_page_state);
355
356 /*
357  * Optimized increment and decrement functions.
358  *
359  * These are only for a single page and therefore can take a struct page *
360  * argument instead of struct zone *. This allows the inclusion of the code
361  * generated for page_zone(page) into the optimized functions.
362  *
363  * No overflow check is necessary and therefore the differential can be
364  * incremented or decremented in place which may allow the compilers to
365  * generate better code.
366  * The increment or decrement is known and therefore one boundary check can
367  * be omitted.
368  *
369  * NOTE: These functions are very performance sensitive. Change only
370  * with care.
371  *
372  * Some processors have inc/dec instructions that are atomic vs an interrupt.
373  * However, the code must first determine the differential location in a zone
374  * based on the processor number and then inc/dec the counter. There is no
375  * guarantee without disabling preemption that the processor will not change
376  * in between and therefore the atomicity vs. interrupt cannot be exploited
377  * in a useful way here.
378  */
379 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
380 {
381         struct per_cpu_pageset __percpu *pcp = zone->pageset;
382         s8 __percpu *p = pcp->vm_stat_diff + item;
383         s8 v, t;
384
385         v = __this_cpu_inc_return(*p);
386         t = __this_cpu_read(pcp->stat_threshold);
387         if (unlikely(v > t)) {
388                 s8 overstep = t >> 1;
389
390                 zone_page_state_add(v + overstep, zone, item);
391                 __this_cpu_write(*p, -overstep);
392         }
393 }
394
395 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
396 {
397         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
398         s8 __percpu *p = pcp->vm_node_stat_diff + item;
399         s8 v, t;
400
401         v = __this_cpu_inc_return(*p);
402         t = __this_cpu_read(pcp->stat_threshold);
403         if (unlikely(v > t)) {
404                 s8 overstep = t >> 1;
405
406                 node_page_state_add(v + overstep, pgdat, item);
407                 __this_cpu_write(*p, -overstep);
408         }
409 }
410
411 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
412 {
413         __inc_zone_state(page_zone(page), item);
414 }
415 EXPORT_SYMBOL(__inc_zone_page_state);
416
417 void __inc_node_page_state(struct page *page, enum node_stat_item item)
418 {
419         __inc_node_state(page_pgdat(page), item);
420 }
421 EXPORT_SYMBOL(__inc_node_page_state);
422
423 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
424 {
425         struct per_cpu_pageset __percpu *pcp = zone->pageset;
426         s8 __percpu *p = pcp->vm_stat_diff + item;
427         s8 v, t;
428
429         v = __this_cpu_dec_return(*p);
430         t = __this_cpu_read(pcp->stat_threshold);
431         if (unlikely(v < - t)) {
432                 s8 overstep = t >> 1;
433
434                 zone_page_state_add(v - overstep, zone, item);
435                 __this_cpu_write(*p, overstep);
436         }
437 }
438
439 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
440 {
441         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
442         s8 __percpu *p = pcp->vm_node_stat_diff + item;
443         s8 v, t;
444
445         v = __this_cpu_dec_return(*p);
446         t = __this_cpu_read(pcp->stat_threshold);
447         if (unlikely(v < - t)) {
448                 s8 overstep = t >> 1;
449
450                 node_page_state_add(v - overstep, pgdat, item);
451                 __this_cpu_write(*p, overstep);
452         }
453 }
454
455 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
456 {
457         __dec_zone_state(page_zone(page), item);
458 }
459 EXPORT_SYMBOL(__dec_zone_page_state);
460
461 void __dec_node_page_state(struct page *page, enum node_stat_item item)
462 {
463         __dec_node_state(page_pgdat(page), item);
464 }
465 EXPORT_SYMBOL(__dec_node_page_state);
466
467 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
468 /*
469  * If we have cmpxchg_local support then we do not need to incur the overhead
470  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
471  *
472  * mod_state() modifies the zone counter state through atomic per cpu
473  * operations.
474  *
475  * Overstep mode specifies how overstep should handled:
476  *     0       No overstepping
477  *     1       Overstepping half of threshold
478  *     -1      Overstepping minus half of threshold
479 */
480 static inline void mod_zone_state(struct zone *zone,
481        enum zone_stat_item item, long delta, int overstep_mode)
482 {
483         struct per_cpu_pageset __percpu *pcp = zone->pageset;
484         s8 __percpu *p = pcp->vm_stat_diff + item;
485         long o, n, t, z;
486
487         do {
488                 z = 0;  /* overflow to zone counters */
489
490                 /*
491                  * The fetching of the stat_threshold is racy. We may apply
492                  * a counter threshold to the wrong the cpu if we get
493                  * rescheduled while executing here. However, the next
494                  * counter update will apply the threshold again and
495                  * therefore bring the counter under the threshold again.
496                  *
497                  * Most of the time the thresholds are the same anyways
498                  * for all cpus in a zone.
499                  */
500                 t = this_cpu_read(pcp->stat_threshold);
501
502                 o = this_cpu_read(*p);
503                 n = delta + o;
504
505                 if (n > t || n < -t) {
506                         int os = overstep_mode * (t >> 1) ;
507
508                         /* Overflow must be added to zone counters */
509                         z = n + os;
510                         n = -os;
511                 }
512         } while (this_cpu_cmpxchg(*p, o, n) != o);
513
514         if (z)
515                 zone_page_state_add(z, zone, item);
516 }
517
518 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
519                          long delta)
520 {
521         mod_zone_state(zone, item, delta, 0);
522 }
523 EXPORT_SYMBOL(mod_zone_page_state);
524
525 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
526 {
527         mod_zone_state(page_zone(page), item, 1, 1);
528 }
529 EXPORT_SYMBOL(inc_zone_page_state);
530
531 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
532 {
533         mod_zone_state(page_zone(page), item, -1, -1);
534 }
535 EXPORT_SYMBOL(dec_zone_page_state);
536
537 static inline void mod_node_state(struct pglist_data *pgdat,
538        enum node_stat_item item, int delta, int overstep_mode)
539 {
540         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
541         s8 __percpu *p = pcp->vm_node_stat_diff + item;
542         long o, n, t, z;
543
544         do {
545                 z = 0;  /* overflow to node counters */
546
547                 /*
548                  * The fetching of the stat_threshold is racy. We may apply
549                  * a counter threshold to the wrong the cpu if we get
550                  * rescheduled while executing here. However, the next
551                  * counter update will apply the threshold again and
552                  * therefore bring the counter under the threshold again.
553                  *
554                  * Most of the time the thresholds are the same anyways
555                  * for all cpus in a node.
556                  */
557                 t = this_cpu_read(pcp->stat_threshold);
558
559                 o = this_cpu_read(*p);
560                 n = delta + o;
561
562                 if (n > t || n < -t) {
563                         int os = overstep_mode * (t >> 1) ;
564
565                         /* Overflow must be added to node counters */
566                         z = n + os;
567                         n = -os;
568                 }
569         } while (this_cpu_cmpxchg(*p, o, n) != o);
570
571         if (z)
572                 node_page_state_add(z, pgdat, item);
573 }
574
575 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
576                                         long delta)
577 {
578         mod_node_state(pgdat, item, delta, 0);
579 }
580 EXPORT_SYMBOL(mod_node_page_state);
581
582 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
583 {
584         mod_node_state(pgdat, item, 1, 1);
585 }
586
587 void inc_node_page_state(struct page *page, enum node_stat_item item)
588 {
589         mod_node_state(page_pgdat(page), item, 1, 1);
590 }
591 EXPORT_SYMBOL(inc_node_page_state);
592
593 void dec_node_page_state(struct page *page, enum node_stat_item item)
594 {
595         mod_node_state(page_pgdat(page), item, -1, -1);
596 }
597 EXPORT_SYMBOL(dec_node_page_state);
598 #else
599 /*
600  * Use interrupt disable to serialize counter updates
601  */
602 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
603                          long delta)
604 {
605         unsigned long flags;
606
607         local_irq_save(flags);
608         __mod_zone_page_state(zone, item, delta);
609         local_irq_restore(flags);
610 }
611 EXPORT_SYMBOL(mod_zone_page_state);
612
613 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
614 {
615         unsigned long flags;
616         struct zone *zone;
617
618         zone = page_zone(page);
619         local_irq_save(flags);
620         __inc_zone_state(zone, item);
621         local_irq_restore(flags);
622 }
623 EXPORT_SYMBOL(inc_zone_page_state);
624
625 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
626 {
627         unsigned long flags;
628
629         local_irq_save(flags);
630         __dec_zone_page_state(page, item);
631         local_irq_restore(flags);
632 }
633 EXPORT_SYMBOL(dec_zone_page_state);
634
635 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
636 {
637         unsigned long flags;
638
639         local_irq_save(flags);
640         __inc_node_state(pgdat, item);
641         local_irq_restore(flags);
642 }
643 EXPORT_SYMBOL(inc_node_state);
644
645 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
646                                         long delta)
647 {
648         unsigned long flags;
649
650         local_irq_save(flags);
651         __mod_node_page_state(pgdat, item, delta);
652         local_irq_restore(flags);
653 }
654 EXPORT_SYMBOL(mod_node_page_state);
655
656 void inc_node_page_state(struct page *page, enum node_stat_item item)
657 {
658         unsigned long flags;
659         struct pglist_data *pgdat;
660
661         pgdat = page_pgdat(page);
662         local_irq_save(flags);
663         __inc_node_state(pgdat, item);
664         local_irq_restore(flags);
665 }
666 EXPORT_SYMBOL(inc_node_page_state);
667
668 void dec_node_page_state(struct page *page, enum node_stat_item item)
669 {
670         unsigned long flags;
671
672         local_irq_save(flags);
673         __dec_node_page_state(page, item);
674         local_irq_restore(flags);
675 }
676 EXPORT_SYMBOL(dec_node_page_state);
677 #endif
678
679 /*
680  * Fold a differential into the global counters.
681  * Returns the number of counters updated.
682  */
683 #ifdef CONFIG_NUMA
684 static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
685 {
686         int i;
687         int changes = 0;
688
689         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
690                 if (zone_diff[i]) {
691                         atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
692                         changes++;
693         }
694
695         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
696                 if (numa_diff[i]) {
697                         atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
698                         changes++;
699         }
700
701         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
702                 if (node_diff[i]) {
703                         atomic_long_add(node_diff[i], &vm_node_stat[i]);
704                         changes++;
705         }
706         return changes;
707 }
708 #else
709 static int fold_diff(int *zone_diff, int *node_diff)
710 {
711         int i;
712         int changes = 0;
713
714         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
715                 if (zone_diff[i]) {
716                         atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
717                         changes++;
718         }
719
720         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
721                 if (node_diff[i]) {
722                         atomic_long_add(node_diff[i], &vm_node_stat[i]);
723                         changes++;
724         }
725         return changes;
726 }
727 #endif /* CONFIG_NUMA */
728
729 /*
730  * Update the zone counters for the current cpu.
731  *
732  * Note that refresh_cpu_vm_stats strives to only access
733  * node local memory. The per cpu pagesets on remote zones are placed
734  * in the memory local to the processor using that pageset. So the
735  * loop over all zones will access a series of cachelines local to
736  * the processor.
737  *
738  * The call to zone_page_state_add updates the cachelines with the
739  * statistics in the remote zone struct as well as the global cachelines
740  * with the global counters. These could cause remote node cache line
741  * bouncing and will have to be only done when necessary.
742  *
743  * The function returns the number of global counters updated.
744  */
745 static int refresh_cpu_vm_stats(bool do_pagesets)
746 {
747         struct pglist_data *pgdat;
748         struct zone *zone;
749         int i;
750         int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
751 #ifdef CONFIG_NUMA
752         int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
753 #endif
754         int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
755         int changes = 0;
756
757         for_each_populated_zone(zone) {
758                 struct per_cpu_pageset __percpu *p = zone->pageset;
759
760                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
761                         int v;
762
763                         v = this_cpu_xchg(p->vm_stat_diff[i], 0);
764                         if (v) {
765
766                                 atomic_long_add(v, &zone->vm_stat[i]);
767                                 global_zone_diff[i] += v;
768 #ifdef CONFIG_NUMA
769                                 /* 3 seconds idle till flush */
770                                 __this_cpu_write(p->expire, 3);
771 #endif
772                         }
773                 }
774 #ifdef CONFIG_NUMA
775                 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
776                         int v;
777
778                         v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
779                         if (v) {
780
781                                 atomic_long_add(v, &zone->vm_numa_stat[i]);
782                                 global_numa_diff[i] += v;
783                                 __this_cpu_write(p->expire, 3);
784                         }
785                 }
786
787                 if (do_pagesets) {
788                         cond_resched();
789                         /*
790                          * Deal with draining the remote pageset of this
791                          * processor
792                          *
793                          * Check if there are pages remaining in this pageset
794                          * if not then there is nothing to expire.
795                          */
796                         if (!__this_cpu_read(p->expire) ||
797                                !__this_cpu_read(p->pcp.count))
798                                 continue;
799
800                         /*
801                          * We never drain zones local to this processor.
802                          */
803                         if (zone_to_nid(zone) == numa_node_id()) {
804                                 __this_cpu_write(p->expire, 0);
805                                 continue;
806                         }
807
808                         if (__this_cpu_dec_return(p->expire))
809                                 continue;
810
811                         if (__this_cpu_read(p->pcp.count)) {
812                                 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
813                                 changes++;
814                         }
815                 }
816 #endif
817         }
818
819         for_each_online_pgdat(pgdat) {
820                 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
821
822                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
823                         int v;
824
825                         v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
826                         if (v) {
827                                 atomic_long_add(v, &pgdat->vm_stat[i]);
828                                 global_node_diff[i] += v;
829                         }
830                 }
831         }
832
833 #ifdef CONFIG_NUMA
834         changes += fold_diff(global_zone_diff, global_numa_diff,
835                              global_node_diff);
836 #else
837         changes += fold_diff(global_zone_diff, global_node_diff);
838 #endif
839         return changes;
840 }
841
842 /*
843  * Fold the data for an offline cpu into the global array.
844  * There cannot be any access by the offline cpu and therefore
845  * synchronization is simplified.
846  */
847 void cpu_vm_stats_fold(int cpu)
848 {
849         struct pglist_data *pgdat;
850         struct zone *zone;
851         int i;
852         int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
853 #ifdef CONFIG_NUMA
854         int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
855 #endif
856         int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
857
858         for_each_populated_zone(zone) {
859                 struct per_cpu_pageset *p;
860
861                 p = per_cpu_ptr(zone->pageset, cpu);
862
863                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
864                         if (p->vm_stat_diff[i]) {
865                                 int v;
866
867                                 v = p->vm_stat_diff[i];
868                                 p->vm_stat_diff[i] = 0;
869                                 atomic_long_add(v, &zone->vm_stat[i]);
870                                 global_zone_diff[i] += v;
871                         }
872
873 #ifdef CONFIG_NUMA
874                 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
875                         if (p->vm_numa_stat_diff[i]) {
876                                 int v;
877
878                                 v = p->vm_numa_stat_diff[i];
879                                 p->vm_numa_stat_diff[i] = 0;
880                                 atomic_long_add(v, &zone->vm_numa_stat[i]);
881                                 global_numa_diff[i] += v;
882                         }
883 #endif
884         }
885
886         for_each_online_pgdat(pgdat) {
887                 struct per_cpu_nodestat *p;
888
889                 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
890
891                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
892                         if (p->vm_node_stat_diff[i]) {
893                                 int v;
894
895                                 v = p->vm_node_stat_diff[i];
896                                 p->vm_node_stat_diff[i] = 0;
897                                 atomic_long_add(v, &pgdat->vm_stat[i]);
898                                 global_node_diff[i] += v;
899                         }
900         }
901
902 #ifdef CONFIG_NUMA
903         fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
904 #else
905         fold_diff(global_zone_diff, global_node_diff);
906 #endif
907 }
908
909 /*
910  * this is only called if !populated_zone(zone), which implies no other users of
911  * pset->vm_stat_diff[] exsist.
912  */
913 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
914 {
915         int i;
916
917         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
918                 if (pset->vm_stat_diff[i]) {
919                         int v = pset->vm_stat_diff[i];
920                         pset->vm_stat_diff[i] = 0;
921                         atomic_long_add(v, &zone->vm_stat[i]);
922                         atomic_long_add(v, &vm_zone_stat[i]);
923                 }
924
925 #ifdef CONFIG_NUMA
926         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
927                 if (pset->vm_numa_stat_diff[i]) {
928                         int v = pset->vm_numa_stat_diff[i];
929
930                         pset->vm_numa_stat_diff[i] = 0;
931                         atomic_long_add(v, &zone->vm_numa_stat[i]);
932                         atomic_long_add(v, &vm_numa_stat[i]);
933                 }
934 #endif
935 }
936 #endif
937
938 #ifdef CONFIG_NUMA
939 void __inc_numa_state(struct zone *zone,
940                                  enum numa_stat_item item)
941 {
942         struct per_cpu_pageset __percpu *pcp = zone->pageset;
943         u16 __percpu *p = pcp->vm_numa_stat_diff + item;
944         u16 v;
945
946         v = __this_cpu_inc_return(*p);
947
948         if (unlikely(v > NUMA_STATS_THRESHOLD)) {
949                 zone_numa_state_add(v, zone, item);
950                 __this_cpu_write(*p, 0);
951         }
952 }
953
954 /*
955  * Determine the per node value of a stat item. This function
956  * is called frequently in a NUMA machine, so try to be as
957  * frugal as possible.
958  */
959 unsigned long sum_zone_node_page_state(int node,
960                                  enum zone_stat_item item)
961 {
962         struct zone *zones = NODE_DATA(node)->node_zones;
963         int i;
964         unsigned long count = 0;
965
966         for (i = 0; i < MAX_NR_ZONES; i++)
967                 count += zone_page_state(zones + i, item);
968
969         return count;
970 }
971
972 /*
973  * Determine the per node value of a numa stat item. To avoid deviation,
974  * the per cpu stat number in vm_numa_stat_diff[] is also included.
975  */
976 unsigned long sum_zone_numa_state(int node,
977                                  enum numa_stat_item item)
978 {
979         struct zone *zones = NODE_DATA(node)->node_zones;
980         int i;
981         unsigned long count = 0;
982
983         for (i = 0; i < MAX_NR_ZONES; i++)
984                 count += zone_numa_state_snapshot(zones + i, item);
985
986         return count;
987 }
988
989 /*
990  * Determine the per node value of a stat item.
991  */
992 unsigned long node_page_state(struct pglist_data *pgdat,
993                                 enum node_stat_item item)
994 {
995         long x = atomic_long_read(&pgdat->vm_stat[item]);
996 #ifdef CONFIG_SMP
997         if (x < 0)
998                 x = 0;
999 #endif
1000         return x;
1001 }
1002 #endif
1003
1004 #ifdef CONFIG_COMPACTION
1005
1006 struct contig_page_info {
1007         unsigned long free_pages;
1008         unsigned long free_blocks_total;
1009         unsigned long free_blocks_suitable;
1010 };
1011
1012 /*
1013  * Calculate the number of free pages in a zone, how many contiguous
1014  * pages are free and how many are large enough to satisfy an allocation of
1015  * the target size. Note that this function makes no attempt to estimate
1016  * how many suitable free blocks there *might* be if MOVABLE pages were
1017  * migrated. Calculating that is possible, but expensive and can be
1018  * figured out from userspace
1019  */
1020 static void fill_contig_page_info(struct zone *zone,
1021                                 unsigned int suitable_order,
1022                                 struct contig_page_info *info)
1023 {
1024         unsigned int order;
1025
1026         info->free_pages = 0;
1027         info->free_blocks_total = 0;
1028         info->free_blocks_suitable = 0;
1029
1030         for (order = 0; order < MAX_ORDER; order++) {
1031                 unsigned long blocks;
1032
1033                 /* Count number of free blocks */
1034                 blocks = zone->free_area[order].nr_free;
1035                 info->free_blocks_total += blocks;
1036
1037                 /* Count free base pages */
1038                 info->free_pages += blocks << order;
1039
1040                 /* Count the suitable free blocks */
1041                 if (order >= suitable_order)
1042                         info->free_blocks_suitable += blocks <<
1043                                                 (order - suitable_order);
1044         }
1045 }
1046
1047 /*
1048  * A fragmentation index only makes sense if an allocation of a requested
1049  * size would fail. If that is true, the fragmentation index indicates
1050  * whether external fragmentation or a lack of memory was the problem.
1051  * The value can be used to determine if page reclaim or compaction
1052  * should be used
1053  */
1054 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1055 {
1056         unsigned long requested = 1UL << order;
1057
1058         if (WARN_ON_ONCE(order >= MAX_ORDER))
1059                 return 0;
1060
1061         if (!info->free_blocks_total)
1062                 return 0;
1063
1064         /* Fragmentation index only makes sense when a request would fail */
1065         if (info->free_blocks_suitable)
1066                 return -1000;
1067
1068         /*
1069          * Index is between 0 and 1 so return within 3 decimal places
1070          *
1071          * 0 => allocation would fail due to lack of memory
1072          * 1 => allocation would fail due to fragmentation
1073          */
1074         return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1075 }
1076
1077 /* Same as __fragmentation index but allocs contig_page_info on stack */
1078 int fragmentation_index(struct zone *zone, unsigned int order)
1079 {
1080         struct contig_page_info info;
1081
1082         fill_contig_page_info(zone, order, &info);
1083         return __fragmentation_index(order, &info);
1084 }
1085 #endif
1086
1087 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1088     defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1089 #ifdef CONFIG_ZONE_DMA
1090 #define TEXT_FOR_DMA(xx) xx "_dma",
1091 #else
1092 #define TEXT_FOR_DMA(xx)
1093 #endif
1094
1095 #ifdef CONFIG_ZONE_DMA32
1096 #define TEXT_FOR_DMA32(xx) xx "_dma32",
1097 #else
1098 #define TEXT_FOR_DMA32(xx)
1099 #endif
1100
1101 #ifdef CONFIG_HIGHMEM
1102 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
1103 #else
1104 #define TEXT_FOR_HIGHMEM(xx)
1105 #endif
1106
1107 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1108                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
1109
1110 const char * const vmstat_text[] = {
1111         /* enum zone_stat_item counters */
1112         "nr_free_pages",
1113         "nr_zone_inactive_anon",
1114         "nr_zone_active_anon",
1115         "nr_zone_inactive_file",
1116         "nr_zone_active_file",
1117         "nr_zone_unevictable",
1118         "nr_zone_write_pending",
1119         "nr_mlock",
1120         "nr_page_table_pages",
1121         "nr_kernel_stack",
1122 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1123         "nr_shadow_call_stack",
1124 #endif
1125         "nr_bounce",
1126 #if IS_ENABLED(CONFIG_ZSMALLOC)
1127         "nr_zspages",
1128 #endif
1129         "nr_free_cma",
1130
1131         /* enum numa_stat_item counters */
1132 #ifdef CONFIG_NUMA
1133         "numa_hit",
1134         "numa_miss",
1135         "numa_foreign",
1136         "numa_interleave",
1137         "numa_local",
1138         "numa_other",
1139 #endif
1140
1141         /* enum node_stat_item counters */
1142         "nr_inactive_anon",
1143         "nr_active_anon",
1144         "nr_inactive_file",
1145         "nr_active_file",
1146         "nr_unevictable",
1147         "nr_slab_reclaimable",
1148         "nr_slab_unreclaimable",
1149         "nr_isolated_anon",
1150         "nr_isolated_file",
1151         "workingset_nodes",
1152         "workingset_refault",
1153         "workingset_activate",
1154         "workingset_restore",
1155         "workingset_nodereclaim",
1156         "nr_anon_pages",
1157         "nr_mapped",
1158         "nr_file_pages",
1159         "nr_dirty",
1160         "nr_writeback",
1161         "nr_writeback_temp",
1162         "nr_shmem",
1163         "nr_shmem_hugepages",
1164         "nr_shmem_pmdmapped",
1165         "nr_file_hugepages",
1166         "nr_file_pmdmapped",
1167         "nr_anon_transparent_hugepages",
1168         "nr_vmscan_write",
1169         "nr_vmscan_immediate_reclaim",
1170         "nr_dirtied",
1171         "nr_written",
1172         "nr_kernel_misc_reclaimable",
1173         "nr_foll_pin_acquired",
1174         "nr_foll_pin_released",
1175
1176         /* enum writeback_stat_item counters */
1177         "nr_dirty_threshold",
1178         "nr_dirty_background_threshold",
1179
1180 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1181         /* enum vm_event_item counters */
1182         "pgpgin",
1183         "pgpgout",
1184         "pswpin",
1185         "pswpout",
1186
1187         TEXTS_FOR_ZONES("pgalloc")
1188         TEXTS_FOR_ZONES("allocstall")
1189         TEXTS_FOR_ZONES("pgskip")
1190
1191         "pgfree",
1192         "pgactivate",
1193         "pgdeactivate",
1194         "pglazyfree",
1195
1196         "pgfault",
1197         "pgmajfault",
1198         "pglazyfreed",
1199
1200         "pgrefill",
1201         "pgsteal_kswapd",
1202         "pgsteal_direct",
1203         "pgscan_kswapd",
1204         "pgscan_direct",
1205         "pgscan_direct_throttle",
1206         "pgscan_anon",
1207         "pgscan_file",
1208         "pgsteal_anon",
1209         "pgsteal_file",
1210
1211 #ifdef CONFIG_NUMA
1212         "zone_reclaim_failed",
1213 #endif
1214         "pginodesteal",
1215         "slabs_scanned",
1216         "kswapd_inodesteal",
1217         "kswapd_low_wmark_hit_quickly",
1218         "kswapd_high_wmark_hit_quickly",
1219         "pageoutrun",
1220
1221         "pgrotated",
1222
1223         "drop_pagecache",
1224         "drop_slab",
1225         "oom_kill",
1226
1227 #ifdef CONFIG_NUMA_BALANCING
1228         "numa_pte_updates",
1229         "numa_huge_pte_updates",
1230         "numa_hint_faults",
1231         "numa_hint_faults_local",
1232         "numa_pages_migrated",
1233 #endif
1234 #ifdef CONFIG_MIGRATION
1235         "pgmigrate_success",
1236         "pgmigrate_fail",
1237 #endif
1238 #ifdef CONFIG_COMPACTION
1239         "compact_migrate_scanned",
1240         "compact_free_scanned",
1241         "compact_isolated",
1242         "compact_stall",
1243         "compact_fail",
1244         "compact_success",
1245         "compact_daemon_wake",
1246         "compact_daemon_migrate_scanned",
1247         "compact_daemon_free_scanned",
1248 #endif
1249
1250 #ifdef CONFIG_HUGETLB_PAGE
1251         "htlb_buddy_alloc_success",
1252         "htlb_buddy_alloc_fail",
1253 #endif
1254         "unevictable_pgs_culled",
1255         "unevictable_pgs_scanned",
1256         "unevictable_pgs_rescued",
1257         "unevictable_pgs_mlocked",
1258         "unevictable_pgs_munlocked",
1259         "unevictable_pgs_cleared",
1260         "unevictable_pgs_stranded",
1261
1262 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1263         "thp_fault_alloc",
1264         "thp_fault_fallback",
1265         "thp_fault_fallback_charge",
1266         "thp_collapse_alloc",
1267         "thp_collapse_alloc_failed",
1268         "thp_file_alloc",
1269         "thp_file_fallback",
1270         "thp_file_fallback_charge",
1271         "thp_file_mapped",
1272         "thp_split_page",
1273         "thp_split_page_failed",
1274         "thp_deferred_split_page",
1275         "thp_split_pmd",
1276 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1277         "thp_split_pud",
1278 #endif
1279         "thp_zero_page_alloc",
1280         "thp_zero_page_alloc_failed",
1281         "thp_swpout",
1282         "thp_swpout_fallback",
1283 #endif
1284 #ifdef CONFIG_MEMORY_BALLOON
1285         "balloon_inflate",
1286         "balloon_deflate",
1287 #ifdef CONFIG_BALLOON_COMPACTION
1288         "balloon_migrate",
1289 #endif
1290 #endif /* CONFIG_MEMORY_BALLOON */
1291 #ifdef CONFIG_DEBUG_TLBFLUSH
1292         "nr_tlb_remote_flush",
1293         "nr_tlb_remote_flush_received",
1294         "nr_tlb_local_flush_all",
1295         "nr_tlb_local_flush_one",
1296 #endif /* CONFIG_DEBUG_TLBFLUSH */
1297
1298 #ifdef CONFIG_DEBUG_VM_VMACACHE
1299         "vmacache_find_calls",
1300         "vmacache_find_hits",
1301 #endif
1302 #ifdef CONFIG_SWAP
1303         "swap_ra",
1304         "swap_ra_hit",
1305 #endif
1306 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1307 };
1308 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1309
1310 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1311      defined(CONFIG_PROC_FS)
1312 static void *frag_start(struct seq_file *m, loff_t *pos)
1313 {
1314         pg_data_t *pgdat;
1315         loff_t node = *pos;
1316
1317         for (pgdat = first_online_pgdat();
1318              pgdat && node;
1319              pgdat = next_online_pgdat(pgdat))
1320                 --node;
1321
1322         return pgdat;
1323 }
1324
1325 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1326 {
1327         pg_data_t *pgdat = (pg_data_t *)arg;
1328
1329         (*pos)++;
1330         return next_online_pgdat(pgdat);
1331 }
1332
1333 static void frag_stop(struct seq_file *m, void *arg)
1334 {
1335 }
1336
1337 /*
1338  * Walk zones in a node and print using a callback.
1339  * If @assert_populated is true, only use callback for zones that are populated.
1340  */
1341 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1342                 bool assert_populated, bool nolock,
1343                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1344 {
1345         struct zone *zone;
1346         struct zone *node_zones = pgdat->node_zones;
1347         unsigned long flags;
1348
1349         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1350                 if (assert_populated && !populated_zone(zone))
1351                         continue;
1352
1353                 if (!nolock)
1354                         spin_lock_irqsave(&zone->lock, flags);
1355                 print(m, pgdat, zone);
1356                 if (!nolock)
1357                         spin_unlock_irqrestore(&zone->lock, flags);
1358         }
1359 }
1360 #endif
1361
1362 #ifdef CONFIG_PROC_FS
1363 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1364                                                 struct zone *zone)
1365 {
1366         int order;
1367
1368         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1369         for (order = 0; order < MAX_ORDER; ++order)
1370                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1371         seq_putc(m, '\n');
1372 }
1373
1374 /*
1375  * This walks the free areas for each zone.
1376  */
1377 static int frag_show(struct seq_file *m, void *arg)
1378 {
1379         pg_data_t *pgdat = (pg_data_t *)arg;
1380         walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1381         return 0;
1382 }
1383
1384 static void pagetypeinfo_showfree_print(struct seq_file *m,
1385                                         pg_data_t *pgdat, struct zone *zone)
1386 {
1387         int order, mtype;
1388
1389         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1390                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1391                                         pgdat->node_id,
1392                                         zone->name,
1393                                         migratetype_names[mtype]);
1394                 for (order = 0; order < MAX_ORDER; ++order) {
1395                         unsigned long freecount = 0;
1396                         struct free_area *area;
1397                         struct list_head *curr;
1398                         bool overflow = false;
1399
1400                         area = &(zone->free_area[order]);
1401
1402                         list_for_each(curr, &area->free_list[mtype]) {
1403                                 /*
1404                                  * Cap the free_list iteration because it might
1405                                  * be really large and we are under a spinlock
1406                                  * so a long time spent here could trigger a
1407                                  * hard lockup detector. Anyway this is a
1408                                  * debugging tool so knowing there is a handful
1409                                  * of pages of this order should be more than
1410                                  * sufficient.
1411                                  */
1412                                 if (++freecount >= 100000) {
1413                                         overflow = true;
1414                                         break;
1415                                 }
1416                         }
1417                         seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1418                         spin_unlock_irq(&zone->lock);
1419                         cond_resched();
1420                         spin_lock_irq(&zone->lock);
1421                 }
1422                 seq_putc(m, '\n');
1423         }
1424 }
1425
1426 /* Print out the free pages at each order for each migatetype */
1427 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1428 {
1429         int order;
1430         pg_data_t *pgdat = (pg_data_t *)arg;
1431
1432         /* Print header */
1433         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1434         for (order = 0; order < MAX_ORDER; ++order)
1435                 seq_printf(m, "%6d ", order);
1436         seq_putc(m, '\n');
1437
1438         walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1439
1440         return 0;
1441 }
1442
1443 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1444                                         pg_data_t *pgdat, struct zone *zone)
1445 {
1446         int mtype;
1447         unsigned long pfn;
1448         unsigned long start_pfn = zone->zone_start_pfn;
1449         unsigned long end_pfn = zone_end_pfn(zone);
1450         unsigned long count[MIGRATE_TYPES] = { 0, };
1451
1452         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1453                 struct page *page;
1454
1455                 page = pfn_to_online_page(pfn);
1456                 if (!page)
1457                         continue;
1458
1459                 /* Watch for unexpected holes punched in the memmap */
1460                 if (!memmap_valid_within(pfn, page, zone))
1461                         continue;
1462
1463                 if (page_zone(page) != zone)
1464                         continue;
1465
1466                 mtype = get_pageblock_migratetype(page);
1467
1468                 if (mtype < MIGRATE_TYPES)
1469                         count[mtype]++;
1470         }
1471
1472         /* Print counts */
1473         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1474         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1475                 seq_printf(m, "%12lu ", count[mtype]);
1476         seq_putc(m, '\n');
1477 }
1478
1479 /* Print out the number of pageblocks for each migratetype */
1480 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1481 {
1482         int mtype;
1483         pg_data_t *pgdat = (pg_data_t *)arg;
1484
1485         seq_printf(m, "\n%-23s", "Number of blocks type ");
1486         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1487                 seq_printf(m, "%12s ", migratetype_names[mtype]);
1488         seq_putc(m, '\n');
1489         walk_zones_in_node(m, pgdat, true, false,
1490                 pagetypeinfo_showblockcount_print);
1491
1492         return 0;
1493 }
1494
1495 /*
1496  * Print out the number of pageblocks for each migratetype that contain pages
1497  * of other types. This gives an indication of how well fallbacks are being
1498  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1499  * to determine what is going on
1500  */
1501 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1502 {
1503 #ifdef CONFIG_PAGE_OWNER
1504         int mtype;
1505
1506         if (!static_branch_unlikely(&page_owner_inited))
1507                 return;
1508
1509         drain_all_pages(NULL);
1510
1511         seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1512         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1513                 seq_printf(m, "%12s ", migratetype_names[mtype]);
1514         seq_putc(m, '\n');
1515
1516         walk_zones_in_node(m, pgdat, true, true,
1517                 pagetypeinfo_showmixedcount_print);
1518 #endif /* CONFIG_PAGE_OWNER */
1519 }
1520
1521 /*
1522  * This prints out statistics in relation to grouping pages by mobility.
1523  * It is expensive to collect so do not constantly read the file.
1524  */
1525 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1526 {
1527         pg_data_t *pgdat = (pg_data_t *)arg;
1528
1529         /* check memoryless node */
1530         if (!node_state(pgdat->node_id, N_MEMORY))
1531                 return 0;
1532
1533         seq_printf(m, "Page block order: %d\n", pageblock_order);
1534         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1535         seq_putc(m, '\n');
1536         pagetypeinfo_showfree(m, pgdat);
1537         pagetypeinfo_showblockcount(m, pgdat);
1538         pagetypeinfo_showmixedcount(m, pgdat);
1539
1540         return 0;
1541 }
1542
1543 static const struct seq_operations fragmentation_op = {
1544         .start  = frag_start,
1545         .next   = frag_next,
1546         .stop   = frag_stop,
1547         .show   = frag_show,
1548 };
1549
1550 static const struct seq_operations pagetypeinfo_op = {
1551         .start  = frag_start,
1552         .next   = frag_next,
1553         .stop   = frag_stop,
1554         .show   = pagetypeinfo_show,
1555 };
1556
1557 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1558 {
1559         int zid;
1560
1561         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1562                 struct zone *compare = &pgdat->node_zones[zid];
1563
1564                 if (populated_zone(compare))
1565                         return zone == compare;
1566         }
1567
1568         return false;
1569 }
1570
1571 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1572                                                         struct zone *zone)
1573 {
1574         int i;
1575         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1576         if (is_zone_first_populated(pgdat, zone)) {
1577                 seq_printf(m, "\n  per-node stats");
1578                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1579                         seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
1580                                    node_page_state(pgdat, i));
1581                 }
1582         }
1583         seq_printf(m,
1584                    "\n  pages free     %lu"
1585                    "\n        min      %lu"
1586                    "\n        low      %lu"
1587                    "\n        high     %lu"
1588                    "\n        spanned  %lu"
1589                    "\n        present  %lu"
1590                    "\n        managed  %lu",
1591                    zone_page_state(zone, NR_FREE_PAGES),
1592                    min_wmark_pages(zone),
1593                    low_wmark_pages(zone),
1594                    high_wmark_pages(zone),
1595                    zone->spanned_pages,
1596                    zone->present_pages,
1597                    zone_managed_pages(zone));
1598
1599         /* If unpopulated, no other information is useful */
1600         if (!populated_zone(zone)) {
1601                 seq_putc(m, '\n');
1602                 return;
1603         }
1604
1605         seq_printf(m,
1606                    "\n        protection: (%ld",
1607                    zone->lowmem_reserve[0]);
1608         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1609                 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1610         seq_putc(m, ')');
1611
1612         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1613                 seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
1614                            zone_page_state(zone, i));
1615
1616 #ifdef CONFIG_NUMA
1617         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1618                 seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
1619                            zone_numa_state_snapshot(zone, i));
1620 #endif
1621
1622         seq_printf(m, "\n  pagesets");
1623         for_each_online_cpu(i) {
1624                 struct per_cpu_pageset *pageset;
1625
1626                 pageset = per_cpu_ptr(zone->pageset, i);
1627                 seq_printf(m,
1628                            "\n    cpu: %i"
1629                            "\n              count: %i"
1630                            "\n              high:  %i"
1631                            "\n              batch: %i",
1632                            i,
1633                            pageset->pcp.count,
1634                            pageset->pcp.high,
1635                            pageset->pcp.batch);
1636 #ifdef CONFIG_SMP
1637                 seq_printf(m, "\n  vm stats threshold: %d",
1638                                 pageset->stat_threshold);
1639 #endif
1640         }
1641         seq_printf(m,
1642                    "\n  node_unreclaimable:  %u"
1643                    "\n  start_pfn:           %lu",
1644                    pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1645                    zone->zone_start_pfn);
1646         seq_putc(m, '\n');
1647 }
1648
1649 /*
1650  * Output information about zones in @pgdat.  All zones are printed regardless
1651  * of whether they are populated or not: lowmem_reserve_ratio operates on the
1652  * set of all zones and userspace would not be aware of such zones if they are
1653  * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1654  */
1655 static int zoneinfo_show(struct seq_file *m, void *arg)
1656 {
1657         pg_data_t *pgdat = (pg_data_t *)arg;
1658         walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1659         return 0;
1660 }
1661
1662 static const struct seq_operations zoneinfo_op = {
1663         .start  = frag_start, /* iterate over all zones. The same as in
1664                                * fragmentation. */
1665         .next   = frag_next,
1666         .stop   = frag_stop,
1667         .show   = zoneinfo_show,
1668 };
1669
1670 #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1671                          NR_VM_NUMA_STAT_ITEMS + \
1672                          NR_VM_NODE_STAT_ITEMS + \
1673                          NR_VM_WRITEBACK_STAT_ITEMS + \
1674                          (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1675                           NR_VM_EVENT_ITEMS : 0))
1676
1677 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1678 {
1679         unsigned long *v;
1680         int i;
1681
1682         if (*pos >= NR_VMSTAT_ITEMS)
1683                 return NULL;
1684
1685         BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1686         v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1687         m->private = v;
1688         if (!v)
1689                 return ERR_PTR(-ENOMEM);
1690         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1691                 v[i] = global_zone_page_state(i);
1692         v += NR_VM_ZONE_STAT_ITEMS;
1693
1694 #ifdef CONFIG_NUMA
1695         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1696                 v[i] = global_numa_state(i);
1697         v += NR_VM_NUMA_STAT_ITEMS;
1698 #endif
1699
1700         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1701                 v[i] = global_node_page_state(i);
1702         v += NR_VM_NODE_STAT_ITEMS;
1703
1704         global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1705                             v + NR_DIRTY_THRESHOLD);
1706         v += NR_VM_WRITEBACK_STAT_ITEMS;
1707
1708 #ifdef CONFIG_VM_EVENT_COUNTERS
1709         all_vm_events(v);
1710         v[PGPGIN] /= 2;         /* sectors -> kbytes */
1711         v[PGPGOUT] /= 2;
1712 #endif
1713         return (unsigned long *)m->private + *pos;
1714 }
1715
1716 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1717 {
1718         (*pos)++;
1719         if (*pos >= NR_VMSTAT_ITEMS)
1720                 return NULL;
1721         return (unsigned long *)m->private + *pos;
1722 }
1723
1724 static int vmstat_show(struct seq_file *m, void *arg)
1725 {
1726         unsigned long *l = arg;
1727         unsigned long off = l - (unsigned long *)m->private;
1728
1729         seq_puts(m, vmstat_text[off]);
1730         seq_put_decimal_ull(m, " ", *l);
1731         seq_putc(m, '\n');
1732
1733         if (off == NR_VMSTAT_ITEMS - 1) {
1734                 /*
1735                  * We've come to the end - add any deprecated counters to avoid
1736                  * breaking userspace which might depend on them being present.
1737                  */
1738                 seq_puts(m, "nr_unstable 0\n");
1739         }
1740         return 0;
1741 }
1742
1743 static void vmstat_stop(struct seq_file *m, void *arg)
1744 {
1745         kfree(m->private);
1746         m->private = NULL;
1747 }
1748
1749 static const struct seq_operations vmstat_op = {
1750         .start  = vmstat_start,
1751         .next   = vmstat_next,
1752         .stop   = vmstat_stop,
1753         .show   = vmstat_show,
1754 };
1755 #endif /* CONFIG_PROC_FS */
1756
1757 #ifdef CONFIG_SMP
1758 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1759 int sysctl_stat_interval __read_mostly = HZ;
1760
1761 #ifdef CONFIG_PROC_FS
1762 static void refresh_vm_stats(struct work_struct *work)
1763 {
1764         refresh_cpu_vm_stats(true);
1765 }
1766
1767 int vmstat_refresh(struct ctl_table *table, int write,
1768                    void *buffer, size_t *lenp, loff_t *ppos)
1769 {
1770         long val;
1771         int err;
1772         int i;
1773
1774         /*
1775          * The regular update, every sysctl_stat_interval, may come later
1776          * than expected: leaving a significant amount in per_cpu buckets.
1777          * This is particularly misleading when checking a quantity of HUGE
1778          * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1779          * which can equally be echo'ed to or cat'ted from (by root),
1780          * can be used to update the stats just before reading them.
1781          *
1782          * Oh, and since global_zone_page_state() etc. are so careful to hide
1783          * transiently negative values, report an error here if any of
1784          * the stats is negative, so we know to go looking for imbalance.
1785          */
1786         err = schedule_on_each_cpu(refresh_vm_stats);
1787         if (err)
1788                 return err;
1789         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1790                 val = atomic_long_read(&vm_zone_stat[i]);
1791                 if (val < 0) {
1792                         pr_warn("%s: %s %ld\n",
1793                                 __func__, zone_stat_name(i), val);
1794                         err = -EINVAL;
1795                 }
1796         }
1797 #ifdef CONFIG_NUMA
1798         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
1799                 val = atomic_long_read(&vm_numa_stat[i]);
1800                 if (val < 0) {
1801                         pr_warn("%s: %s %ld\n",
1802                                 __func__, numa_stat_name(i), val);
1803                         err = -EINVAL;
1804                 }
1805         }
1806 #endif
1807         if (err)
1808                 return err;
1809         if (write)
1810                 *ppos += *lenp;
1811         else
1812                 *lenp = 0;
1813         return 0;
1814 }
1815 #endif /* CONFIG_PROC_FS */
1816
1817 static void vmstat_update(struct work_struct *w)
1818 {
1819         if (refresh_cpu_vm_stats(true)) {
1820                 /*
1821                  * Counters were updated so we expect more updates
1822                  * to occur in the future. Keep on running the
1823                  * update worker thread.
1824                  */
1825                 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1826                                 this_cpu_ptr(&vmstat_work),
1827                                 round_jiffies_relative(sysctl_stat_interval));
1828         }
1829 }
1830
1831 /*
1832  * Switch off vmstat processing and then fold all the remaining differentials
1833  * until the diffs stay at zero. The function is used by NOHZ and can only be
1834  * invoked when tick processing is not active.
1835  */
1836 /*
1837  * Check if the diffs for a certain cpu indicate that
1838  * an update is needed.
1839  */
1840 static bool need_update(int cpu)
1841 {
1842         struct zone *zone;
1843
1844         for_each_populated_zone(zone) {
1845                 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1846
1847                 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1848 #ifdef CONFIG_NUMA
1849                 BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
1850 #endif
1851
1852                 /*
1853                  * The fast way of checking if there are any vmstat diffs.
1854                  */
1855                 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1856                                sizeof(p->vm_stat_diff[0])))
1857                         return true;
1858 #ifdef CONFIG_NUMA
1859                 if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
1860                                sizeof(p->vm_numa_stat_diff[0])))
1861                         return true;
1862 #endif
1863         }
1864         return false;
1865 }
1866
1867 /*
1868  * Switch off vmstat processing and then fold all the remaining differentials
1869  * until the diffs stay at zero. The function is used by NOHZ and can only be
1870  * invoked when tick processing is not active.
1871  */
1872 void quiet_vmstat(void)
1873 {
1874         if (system_state != SYSTEM_RUNNING)
1875                 return;
1876
1877         if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1878                 return;
1879
1880         if (!need_update(smp_processor_id()))
1881                 return;
1882
1883         /*
1884          * Just refresh counters and do not care about the pending delayed
1885          * vmstat_update. It doesn't fire that often to matter and canceling
1886          * it would be too expensive from this path.
1887          * vmstat_shepherd will take care about that for us.
1888          */
1889         refresh_cpu_vm_stats(false);
1890 }
1891
1892 /*
1893  * Shepherd worker thread that checks the
1894  * differentials of processors that have their worker
1895  * threads for vm statistics updates disabled because of
1896  * inactivity.
1897  */
1898 static void vmstat_shepherd(struct work_struct *w);
1899
1900 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1901
1902 static void vmstat_shepherd(struct work_struct *w)
1903 {
1904         int cpu;
1905
1906         get_online_cpus();
1907         /* Check processors whose vmstat worker threads have been disabled */
1908         for_each_online_cpu(cpu) {
1909                 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1910
1911                 if (!delayed_work_pending(dw) && need_update(cpu))
1912                         queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1913         }
1914         put_online_cpus();
1915
1916         schedule_delayed_work(&shepherd,
1917                 round_jiffies_relative(sysctl_stat_interval));
1918 }
1919
1920 static void __init start_shepherd_timer(void)
1921 {
1922         int cpu;
1923
1924         for_each_possible_cpu(cpu)
1925                 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1926                         vmstat_update);
1927
1928         schedule_delayed_work(&shepherd,
1929                 round_jiffies_relative(sysctl_stat_interval));
1930 }
1931
1932 static void __init init_cpu_node_state(void)
1933 {
1934         int node;
1935
1936         for_each_online_node(node) {
1937                 if (cpumask_weight(cpumask_of_node(node)) > 0)
1938                         node_set_state(node, N_CPU);
1939         }
1940 }
1941
1942 static int vmstat_cpu_online(unsigned int cpu)
1943 {
1944         refresh_zone_stat_thresholds();
1945         node_set_state(cpu_to_node(cpu), N_CPU);
1946         return 0;
1947 }
1948
1949 static int vmstat_cpu_down_prep(unsigned int cpu)
1950 {
1951         cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1952         return 0;
1953 }
1954
1955 static int vmstat_cpu_dead(unsigned int cpu)
1956 {
1957         const struct cpumask *node_cpus;
1958         int node;
1959
1960         node = cpu_to_node(cpu);
1961
1962         refresh_zone_stat_thresholds();
1963         node_cpus = cpumask_of_node(node);
1964         if (cpumask_weight(node_cpus) > 0)
1965                 return 0;
1966
1967         node_clear_state(node, N_CPU);
1968         return 0;
1969 }
1970
1971 #endif
1972
1973 struct workqueue_struct *mm_percpu_wq;
1974
1975 void __init init_mm_internals(void)
1976 {
1977         int ret __maybe_unused;
1978
1979         mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
1980
1981 #ifdef CONFIG_SMP
1982         ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1983                                         NULL, vmstat_cpu_dead);
1984         if (ret < 0)
1985                 pr_err("vmstat: failed to register 'dead' hotplug state\n");
1986
1987         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
1988                                         vmstat_cpu_online,
1989                                         vmstat_cpu_down_prep);
1990         if (ret < 0)
1991                 pr_err("vmstat: failed to register 'online' hotplug state\n");
1992
1993         get_online_cpus();
1994         init_cpu_node_state();
1995         put_online_cpus();
1996
1997         start_shepherd_timer();
1998 #endif
1999 #ifdef CONFIG_PROC_FS
2000         proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2001         proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2002         proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2003         proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2004 #endif
2005 }
2006
2007 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2008
2009 /*
2010  * Return an index indicating how much of the available free memory is
2011  * unusable for an allocation of the requested size.
2012  */
2013 static int unusable_free_index(unsigned int order,
2014                                 struct contig_page_info *info)
2015 {
2016         /* No free memory is interpreted as all free memory is unusable */
2017         if (info->free_pages == 0)
2018                 return 1000;
2019
2020         /*
2021          * Index should be a value between 0 and 1. Return a value to 3
2022          * decimal places.
2023          *
2024          * 0 => no fragmentation
2025          * 1 => high fragmentation
2026          */
2027         return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2028
2029 }
2030
2031 static void unusable_show_print(struct seq_file *m,
2032                                         pg_data_t *pgdat, struct zone *zone)
2033 {
2034         unsigned int order;
2035         int index;
2036         struct contig_page_info info;
2037
2038         seq_printf(m, "Node %d, zone %8s ",
2039                                 pgdat->node_id,
2040                                 zone->name);
2041         for (order = 0; order < MAX_ORDER; ++order) {
2042                 fill_contig_page_info(zone, order, &info);
2043                 index = unusable_free_index(order, &info);
2044                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2045         }
2046
2047         seq_putc(m, '\n');
2048 }
2049
2050 /*
2051  * Display unusable free space index
2052  *
2053  * The unusable free space index measures how much of the available free
2054  * memory cannot be used to satisfy an allocation of a given size and is a
2055  * value between 0 and 1. The higher the value, the more of free memory is
2056  * unusable and by implication, the worse the external fragmentation is. This
2057  * can be expressed as a percentage by multiplying by 100.
2058  */
2059 static int unusable_show(struct seq_file *m, void *arg)
2060 {
2061         pg_data_t *pgdat = (pg_data_t *)arg;
2062
2063         /* check memoryless node */
2064         if (!node_state(pgdat->node_id, N_MEMORY))
2065                 return 0;
2066
2067         walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2068
2069         return 0;
2070 }
2071
2072 static const struct seq_operations unusable_sops = {
2073         .start  = frag_start,
2074         .next   = frag_next,
2075         .stop   = frag_stop,
2076         .show   = unusable_show,
2077 };
2078
2079 DEFINE_SEQ_ATTRIBUTE(unusable);
2080
2081 static void extfrag_show_print(struct seq_file *m,
2082                                         pg_data_t *pgdat, struct zone *zone)
2083 {
2084         unsigned int order;
2085         int index;
2086
2087         /* Alloc on stack as interrupts are disabled for zone walk */
2088         struct contig_page_info info;
2089
2090         seq_printf(m, "Node %d, zone %8s ",
2091                                 pgdat->node_id,
2092                                 zone->name);
2093         for (order = 0; order < MAX_ORDER; ++order) {
2094                 fill_contig_page_info(zone, order, &info);
2095                 index = __fragmentation_index(order, &info);
2096                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2097         }
2098
2099         seq_putc(m, '\n');
2100 }
2101
2102 /*
2103  * Display fragmentation index for orders that allocations would fail for
2104  */
2105 static int extfrag_show(struct seq_file *m, void *arg)
2106 {
2107         pg_data_t *pgdat = (pg_data_t *)arg;
2108
2109         walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2110
2111         return 0;
2112 }
2113
2114 static const struct seq_operations extfrag_sops = {
2115         .start  = frag_start,
2116         .next   = frag_next,
2117         .stop   = frag_stop,
2118         .show   = extfrag_show,
2119 };
2120
2121 DEFINE_SEQ_ATTRIBUTE(extfrag);
2122
2123 static int __init extfrag_debug_init(void)
2124 {
2125         struct dentry *extfrag_debug_root;
2126
2127         extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2128
2129         debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2130                             &unusable_fops);
2131
2132         debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2133                             &extfrag_fops);
2134
2135         return 0;
2136 }
2137
2138 module_init(extfrag_debug_init);
2139 #endif