1 // SPDX-License-Identifier: GPL-2.0
5 * mm/ specific debug routines.
9 #include <linux/kernel.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
20 const char *migrate_reason_names[MR_TYPES] = {
30 const struct trace_print_flags pageflag_names[] = {
35 const struct trace_print_flags gfpflag_names[] = {
40 const struct trace_print_flags vmaflag_names[] = {
45 void __dump_page(struct page *page, const char *reason)
47 struct page *head = compound_head(page);
48 struct address_space *mapping;
49 bool page_poisoned = PagePoisoned(page);
50 bool compound = PageCompound(page);
52 * Accessing the pageblock without the zone lock. It could change to
53 * "isolate" again in the meantime, but since we are just dumping the
54 * state for debugging, it should be fine to accept a bit of
55 * inaccuracy here due to racing.
57 bool page_cma = is_migrate_cma_page(page);
62 * If struct page is poisoned don't access Page*() functions as that
63 * leads to recursive loop. Page*() check for poisoned pages, and calls
64 * dump_page() when detected.
67 pr_warn("page:%px is uninitialized and poisoned", page);
71 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
73 * Corrupt page, so we cannot call page_mapping. Instead, do a
74 * safe subset of the steps that page_mapping() does. Caution:
75 * this will be misleading for tail pages, PageSwapCache pages,
76 * and potentially other situations. (See the page_mapping()
77 * implementation for what's missing here.)
79 unsigned long tmp = (unsigned long)page->mapping;
81 if (tmp & PAGE_MAPPING_ANON)
84 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
88 mapping = page_mapping(page);
92 * Avoid VM_BUG_ON() in page_mapcount().
93 * page->_mapcount space in struct page is used by sl[aou]b pages to
96 mapcount = PageSlab(head) ? 0 : page_mapcount(page);
98 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
99 page, page_ref_count(head), mapcount, mapping,
100 page_to_pgoff(page), page_to_pfn(page));
102 if (hpage_pincount_available(page)) {
103 pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
104 head, compound_order(head),
105 head_compound_mapcount(head),
106 head_compound_pincount(head));
108 pr_warn("head:%p order:%u compound_mapcount:%d\n",
109 head, compound_order(head),
110 head_compound_mapcount(head));
115 else if (PageAnon(page))
119 const struct address_space_operations *a_ops;
120 struct hlist_node *dentry_first;
121 struct dentry *dentry_ptr;
122 struct dentry dentry;
126 * mapping can be invalid pointer and we don't want to crash
127 * accessing it, so probe everything depending on it carefully
129 if (get_kernel_nofault(host, &mapping->host) ||
130 get_kernel_nofault(a_ops, &mapping->a_ops)) {
131 pr_warn("failed to read mapping contents, not a valid kernel address?\n");
136 pr_warn("aops:%ps\n", a_ops);
140 if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
141 get_kernel_nofault(ino, &host->i_ino)) {
142 pr_warn("aops:%ps with invalid host inode %px\n",
148 pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
152 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
153 if (get_kernel_nofault(dentry, dentry_ptr)) {
154 pr_warn("aops:%ps ino:%lx with invalid dentry %px\n",
155 a_ops, ino, dentry_ptr);
158 * if dentry is corrupted, the %pd handler may still
159 * crash, but it's unlikely that we reach here with a
160 * corrupted struct page
162 pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n",
163 a_ops, ino, &dentry);
167 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
169 pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags,
170 page_cma ? " CMA" : "");
173 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
174 sizeof(unsigned long), page,
175 sizeof(struct page), false);
177 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
178 sizeof(unsigned long), head,
179 sizeof(struct page), false);
182 pr_warn("page dumped because: %s\n", reason);
185 if (!page_poisoned && page->memcg_data)
186 pr_warn("pages's memcg:%lx\n", page->memcg_data);
190 void dump_page(struct page *page, const char *reason)
192 __dump_page(page, reason);
193 dump_page_owner(page);
195 EXPORT_SYMBOL(dump_page);
197 #ifdef CONFIG_DEBUG_VM
199 void dump_vma(const struct vm_area_struct *vma)
201 pr_emerg("vma %px start %px end %px\n"
202 "next %px prev %px mm %px\n"
203 "prot %lx anon_vma %px vm_ops %px\n"
204 "pgoff %lx file %px private_data %px\n"
205 "flags: %#lx(%pGv)\n",
206 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
207 vma->vm_prev, vma->vm_mm,
208 (unsigned long)pgprot_val(vma->vm_page_prot),
209 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
210 vma->vm_file, vma->vm_private_data,
211 vma->vm_flags, &vma->vm_flags);
213 EXPORT_SYMBOL(dump_vma);
215 void dump_mm(const struct mm_struct *mm)
217 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
219 "get_unmapped_area %px\n"
221 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
222 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
223 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
224 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
225 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
226 "start_brk %lx brk %lx start_stack %lx\n"
227 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
228 "binfmt %px flags %lx core_state %px\n"
236 #ifdef CONFIG_MMU_NOTIFIER
237 "notifier_subscriptions %px\n"
239 #ifdef CONFIG_NUMA_BALANCING
240 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
242 "tlb_flush_pending %d\n"
243 "def_flags: %#lx(%pGv)\n",
245 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
247 mm->get_unmapped_area,
249 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
250 mm->pgd, atomic_read(&mm->mm_users),
251 atomic_read(&mm->mm_count),
252 mm_pgtables_bytes(mm),
254 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
255 (u64)atomic64_read(&mm->pinned_vm),
256 mm->data_vm, mm->exec_vm, mm->stack_vm,
257 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
258 mm->start_brk, mm->brk, mm->start_stack,
259 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
260 mm->binfmt, mm->flags, mm->core_state,
268 #ifdef CONFIG_MMU_NOTIFIER
269 mm->notifier_subscriptions,
271 #ifdef CONFIG_NUMA_BALANCING
272 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
274 atomic_read(&mm->tlb_flush_pending),
275 mm->def_flags, &mm->def_flags
279 static bool page_init_poisoning __read_mostly = true;
281 static int __init setup_vm_debug(char *str)
283 bool __page_init_poisoning = true;
286 * Calling vm_debug with no arguments is equivalent to requesting
287 * to enable all debugging options we can control.
289 if (*str++ != '=' || !*str)
292 __page_init_poisoning = false;
297 switch (tolower(*str)) {
299 __page_init_poisoning = true;
302 pr_err("vm_debug option '%c' unknown. skipped\n",
309 if (page_init_poisoning && !__page_init_poisoning)
310 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
312 page_init_poisoning = __page_init_poisoning;
316 __setup("vm_debug", setup_vm_debug);
318 void page_init_poison(struct page *page, size_t size)
320 if (page_init_poisoning)
321 memset(page, PAGE_POISON_PATTERN, size);
323 EXPORT_SYMBOL_GPL(page_init_poison);
324 #endif /* CONFIG_DEBUG_VM */