1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/node.h>
3 #include <linux/mmzone.h>
4 #include <linux/compaction.h>
6 * The order of these masks is important. Matching masks will be seen
7 * first and the left over flags will end up showing by themselves.
9 * For example, if we have GFP_KERNEL before GFP_USER we wil get:
11 * GFP_KERNEL|GFP_HARDWALL
13 * Thus most bits set go first.
16 #define __def_gfpflag_names \
17 {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
18 {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
19 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
20 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
21 {(unsigned long)GFP_USER, "GFP_USER"}, \
22 {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
23 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
24 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
25 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
26 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
27 {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
28 {(unsigned long)GFP_DMA, "GFP_DMA"}, \
29 {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
30 {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
31 {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
32 {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
33 {(unsigned long)__GFP_IO, "__GFP_IO"}, \
34 {(unsigned long)__GFP_FS, "__GFP_FS"}, \
35 {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
36 {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
37 {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
38 {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
39 {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
40 {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
41 {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
42 {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
43 {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
44 {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
45 {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
46 {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
47 {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
48 {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
49 {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
50 {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
51 {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
52 {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \
53 {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
55 #define show_gfp_flags(flags) \
56 (flags) ? __print_flags(flags, "|", \
61 #define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
63 #define IF_HAVE_PG_MLOCK(flag,string)
66 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
67 #define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
69 #define IF_HAVE_PG_UNCACHED(flag,string)
72 #ifdef CONFIG_MEMORY_FAILURE
73 #define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
75 #define IF_HAVE_PG_HWPOISON(flag,string)
78 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
79 #define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
81 #define IF_HAVE_PG_IDLE(flag,string)
85 #define IF_HAVE_PG_ARCH_2(flag,string) ,{1UL << flag, string}
87 #define IF_HAVE_PG_ARCH_2(flag,string)
90 #ifdef CONFIG_KASAN_HW_TAGS
91 #define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string}
93 #define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string)
96 #define __def_pageflag_names \
97 {1UL << PG_locked, "locked" }, \
98 {1UL << PG_waiters, "waiters" }, \
99 {1UL << PG_error, "error" }, \
100 {1UL << PG_referenced, "referenced" }, \
101 {1UL << PG_uptodate, "uptodate" }, \
102 {1UL << PG_dirty, "dirty" }, \
103 {1UL << PG_lru, "lru" }, \
104 {1UL << PG_active, "active" }, \
105 {1UL << PG_workingset, "workingset" }, \
106 {1UL << PG_slab, "slab" }, \
107 {1UL << PG_owner_priv_1, "owner_priv_1" }, \
108 {1UL << PG_arch_1, "arch_1" }, \
109 {1UL << PG_reserved, "reserved" }, \
110 {1UL << PG_private, "private" }, \
111 {1UL << PG_private_2, "private_2" }, \
112 {1UL << PG_writeback, "writeback" }, \
113 {1UL << PG_head, "head" }, \
114 {1UL << PG_mappedtodisk, "mappedtodisk" }, \
115 {1UL << PG_reclaim, "reclaim" }, \
116 {1UL << PG_swapbacked, "swapbacked" }, \
117 {1UL << PG_unevictable, "unevictable" } \
118 IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
119 IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
120 IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
121 IF_HAVE_PG_IDLE(PG_young, "young" ) \
122 IF_HAVE_PG_IDLE(PG_idle, "idle" ) \
123 IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \
124 IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
126 #define show_page_flags(flags) \
127 (flags) ? __print_flags(flags, "|", \
128 __def_pageflag_names \
131 #if defined(CONFIG_X86)
132 #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
133 #elif defined(CONFIG_PPC)
134 #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
135 #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
136 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
137 #elif !defined(CONFIG_MMU)
138 #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
140 #define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" }
143 #ifdef CONFIG_MEM_SOFT_DIRTY
144 #define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
146 #define IF_HAVE_VM_SOFTDIRTY(flag,name)
149 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
150 # define IF_HAVE_UFFD_MINOR(flag, name) {flag, name},
152 # define IF_HAVE_UFFD_MINOR(flag, name)
155 #define __def_vmaflag_names \
156 {VM_READ, "read" }, \
157 {VM_WRITE, "write" }, \
158 {VM_EXEC, "exec" }, \
159 {VM_SHARED, "shared" }, \
160 {VM_MAYREAD, "mayread" }, \
161 {VM_MAYWRITE, "maywrite" }, \
162 {VM_MAYEXEC, "mayexec" }, \
163 {VM_MAYSHARE, "mayshare" }, \
164 {VM_GROWSDOWN, "growsdown" }, \
165 {VM_UFFD_MISSING, "uffd_missing" }, \
166 IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \
167 {VM_PFNMAP, "pfnmap" }, \
168 {VM_DENYWRITE, "denywrite" }, \
169 {VM_UFFD_WP, "uffd_wp" }, \
170 {VM_LOCKED, "locked" }, \
172 {VM_SEQ_READ, "seqread" }, \
173 {VM_RAND_READ, "randread" }, \
174 {VM_DONTCOPY, "dontcopy" }, \
175 {VM_DONTEXPAND, "dontexpand" }, \
176 {VM_LOCKONFAULT, "lockonfault" }, \
177 {VM_ACCOUNT, "account" }, \
178 {VM_NORESERVE, "noreserve" }, \
179 {VM_HUGETLB, "hugetlb" }, \
180 {VM_SYNC, "sync" }, \
181 __VM_ARCH_SPECIFIC_1 , \
182 {VM_WIPEONFORK, "wipeonfork" }, \
183 {VM_DONTDUMP, "dontdump" }, \
184 IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
185 {VM_MIXEDMAP, "mixedmap" }, \
186 {VM_HUGEPAGE, "hugepage" }, \
187 {VM_NOHUGEPAGE, "nohugepage" }, \
188 {VM_MERGEABLE, "mergeable" } \
190 #define show_vma_flags(flags) \
191 (flags) ? __print_flags(flags, "|", \
192 __def_vmaflag_names \
195 #ifdef CONFIG_COMPACTION
196 #define COMPACTION_STATUS \
197 EM( COMPACT_SKIPPED, "skipped") \
198 EM( COMPACT_DEFERRED, "deferred") \
199 EM( COMPACT_CONTINUE, "continue") \
200 EM( COMPACT_SUCCESS, "success") \
201 EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
202 EM( COMPACT_COMPLETE, "complete") \
203 EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
204 EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
205 EMe(COMPACT_CONTENDED, "contended")
207 /* High-level compaction status feedback */
208 #define COMPACTION_FAILED 1
209 #define COMPACTION_WITHDRAWN 2
210 #define COMPACTION_PROGRESS 3
212 #define compact_result_to_feedback(result) \
214 enum compact_result __result = result; \
215 (compaction_failed(__result)) ? COMPACTION_FAILED : \
216 (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
219 #define COMPACTION_FEEDBACK \
220 EM(COMPACTION_FAILED, "failed") \
221 EM(COMPACTION_WITHDRAWN, "withdrawn") \
222 EMe(COMPACTION_PROGRESS, "progress")
224 #define COMPACTION_PRIORITY \
225 EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \
226 EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \
227 EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC")
229 #define COMPACTION_STATUS
230 #define COMPACTION_PRIORITY
231 #define COMPACTION_FEEDBACK
234 #ifdef CONFIG_ZONE_DMA
235 #define IFDEF_ZONE_DMA(X) X
237 #define IFDEF_ZONE_DMA(X)
240 #ifdef CONFIG_ZONE_DMA32
241 #define IFDEF_ZONE_DMA32(X) X
243 #define IFDEF_ZONE_DMA32(X)
246 #ifdef CONFIG_HIGHMEM
247 #define IFDEF_ZONE_HIGHMEM(X) X
249 #define IFDEF_ZONE_HIGHMEM(X)
253 IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
254 IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
255 EM (ZONE_NORMAL, "Normal") \
256 IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
257 EMe(ZONE_MOVABLE,"Movable")
260 EM (LRU_INACTIVE_ANON, "inactive_anon") \
261 EM (LRU_ACTIVE_ANON, "active_anon") \
262 EM (LRU_INACTIVE_FILE, "inactive_file") \
263 EM (LRU_ACTIVE_FILE, "active_file") \
264 EMe(LRU_UNEVICTABLE, "unevictable")
267 * First define the enums in the above macros to be exported to userspace
268 * via TRACE_DEFINE_ENUM().
272 #define EM(a, b) TRACE_DEFINE_ENUM(a);
273 #define EMe(a, b) TRACE_DEFINE_ENUM(a);
277 /* COMPACTION_FEEDBACK are defines not enums. Not needed here. */
282 * Now redefine the EM() and EMe() macros to map the enums to the strings
283 * that will be printed in the output.
287 #define EM(a, b) {a, b},
288 #define EMe(a, b) {a, b}