Merge branch 'etnaviv/fixes' of https://git.pengutronix.de/git/lst/linux into drm...
[linux-2.6-microblaze.git] / mm / debug.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17
18 #include "internal.h"
19
20 char *migrate_reason_names[MR_TYPES] = {
21         "compaction",
22         "memory_failure",
23         "memory_hotplug",
24         "syscall_or_cpuset",
25         "mempolicy_mbind",
26         "numa_misplaced",
27         "cma",
28 };
29
30 const struct trace_print_flags pageflag_names[] = {
31         __def_pageflag_names,
32         {0, NULL}
33 };
34
35 const struct trace_print_flags gfpflag_names[] = {
36         __def_gfpflag_names,
37         {0, NULL}
38 };
39
40 const struct trace_print_flags vmaflag_names[] = {
41         __def_vmaflag_names,
42         {0, NULL}
43 };
44
45 void __dump_page(struct page *page, const char *reason)
46 {
47         bool page_poisoned = PagePoisoned(page);
48         int mapcount;
49
50         /*
51          * If struct page is poisoned don't access Page*() functions as that
52          * leads to recursive loop. Page*() check for poisoned pages, and calls
53          * dump_page() when detected.
54          */
55         if (page_poisoned) {
56                 pr_emerg("page:%px is uninitialized and poisoned", page);
57                 goto hex_only;
58         }
59
60         /*
61          * Avoid VM_BUG_ON() in page_mapcount().
62          * page->_mapcount space in struct page is used by sl[aou]b pages to
63          * encode own info.
64          */
65         mapcount = PageSlab(page) ? 0 : page_mapcount(page);
66
67         pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
68                   page, page_ref_count(page), mapcount,
69                   page->mapping, page_to_pgoff(page));
70         if (PageCompound(page))
71                 pr_cont(" compound_mapcount: %d", compound_mapcount(page));
72         pr_cont("\n");
73         BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
74
75         pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
76
77 hex_only:
78         print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
79                         sizeof(unsigned long), page,
80                         sizeof(struct page), false);
81
82         if (reason)
83                 pr_alert("page dumped because: %s\n", reason);
84
85 #ifdef CONFIG_MEMCG
86         if (!page_poisoned && page->mem_cgroup)
87                 pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
88 #endif
89 }
90
91 void dump_page(struct page *page, const char *reason)
92 {
93         __dump_page(page, reason);
94         dump_page_owner(page);
95 }
96 EXPORT_SYMBOL(dump_page);
97
98 #ifdef CONFIG_DEBUG_VM
99
100 void dump_vma(const struct vm_area_struct *vma)
101 {
102         pr_emerg("vma %px start %px end %px\n"
103                 "next %px prev %px mm %px\n"
104                 "prot %lx anon_vma %px vm_ops %px\n"
105                 "pgoff %lx file %px private_data %px\n"
106                 "flags: %#lx(%pGv)\n",
107                 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
108                 vma->vm_prev, vma->vm_mm,
109                 (unsigned long)pgprot_val(vma->vm_page_prot),
110                 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
111                 vma->vm_file, vma->vm_private_data,
112                 vma->vm_flags, &vma->vm_flags);
113 }
114 EXPORT_SYMBOL(dump_vma);
115
116 void dump_mm(const struct mm_struct *mm)
117 {
118         pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
119 #ifdef CONFIG_MMU
120                 "get_unmapped_area %px\n"
121 #endif
122                 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
123                 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
124                 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
125                 "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
126                 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
127                 "start_brk %lx brk %lx start_stack %lx\n"
128                 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
129                 "binfmt %px flags %lx core_state %px\n"
130 #ifdef CONFIG_AIO
131                 "ioctx_table %px\n"
132 #endif
133 #ifdef CONFIG_MEMCG
134                 "owner %px "
135 #endif
136                 "exe_file %px\n"
137 #ifdef CONFIG_MMU_NOTIFIER
138                 "mmu_notifier_mm %px\n"
139 #endif
140 #ifdef CONFIG_NUMA_BALANCING
141                 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
142 #endif
143                 "tlb_flush_pending %d\n"
144                 "def_flags: %#lx(%pGv)\n",
145
146                 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
147 #ifdef CONFIG_MMU
148                 mm->get_unmapped_area,
149 #endif
150                 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
151                 mm->pgd, atomic_read(&mm->mm_users),
152                 atomic_read(&mm->mm_count),
153                 mm_pgtables_bytes(mm),
154                 mm->map_count,
155                 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
156                 mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
157                 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
158                 mm->start_brk, mm->brk, mm->start_stack,
159                 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
160                 mm->binfmt, mm->flags, mm->core_state,
161 #ifdef CONFIG_AIO
162                 mm->ioctx_table,
163 #endif
164 #ifdef CONFIG_MEMCG
165                 mm->owner,
166 #endif
167                 mm->exe_file,
168 #ifdef CONFIG_MMU_NOTIFIER
169                 mm->mmu_notifier_mm,
170 #endif
171 #ifdef CONFIG_NUMA_BALANCING
172                 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
173 #endif
174                 atomic_read(&mm->tlb_flush_pending),
175                 mm->def_flags, &mm->def_flags
176         );
177 }
178
179 static bool page_init_poisoning __read_mostly = true;
180
181 static int __init setup_vm_debug(char *str)
182 {
183         bool __page_init_poisoning = true;
184
185         /*
186          * Calling vm_debug with no arguments is equivalent to requesting
187          * to enable all debugging options we can control.
188          */
189         if (*str++ != '=' || !*str)
190                 goto out;
191
192         __page_init_poisoning = false;
193         if (*str == '-')
194                 goto out;
195
196         while (*str) {
197                 switch (tolower(*str)) {
198                 case'p':
199                         __page_init_poisoning = true;
200                         break;
201                 default:
202                         pr_err("vm_debug option '%c' unknown. skipped\n",
203                                *str);
204                 }
205
206                 str++;
207         }
208 out:
209         if (page_init_poisoning && !__page_init_poisoning)
210                 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
211
212         page_init_poisoning = __page_init_poisoning;
213
214         return 1;
215 }
216 __setup("vm_debug", setup_vm_debug);
217
218 void page_init_poison(struct page *page, size_t size)
219 {
220         if (page_init_poisoning)
221                 memset(page, PAGE_POISON_PATTERN, size);
222 }
223 EXPORT_SYMBOL_GPL(page_init_poison);
224 #endif          /* CONFIG_DEBUG_VM */