1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17
18 #include "internal.h"
19 #include <trace/events/migrate.h>
20
21 /*
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
24 */
25 #undef EM
26 #undef EMe
27 #define EM(a, b) b,
28 #define EMe(a, b) b
29
30 const char *migrate_reason_names[MR_TYPES] = {
31 MIGRATE_REASON
32 };
33
34 const struct trace_print_flags pageflag_names[] = {
35 __def_pageflag_names,
36 {0, NULL}
37 };
38
39 const struct trace_print_flags gfpflag_names[] = {
40 __def_gfpflag_names,
41 {0, NULL}
42 };
43
44 const struct trace_print_flags vmaflag_names[] = {
45 __def_vmaflag_names,
46 {0, NULL}
47 };
48
49 #define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] = __stringify(_name)
50
51 static const char *page_type_names[] = {
52 DEF_PAGETYPE_NAME(slab),
53 DEF_PAGETYPE_NAME(hugetlb),
54 DEF_PAGETYPE_NAME(offline),
55 DEF_PAGETYPE_NAME(guard),
56 DEF_PAGETYPE_NAME(table),
57 DEF_PAGETYPE_NAME(buddy),
58 DEF_PAGETYPE_NAME(unaccepted),
59 };
60
page_type_name(unsigned int page_type)61 static const char *page_type_name(unsigned int page_type)
62 {
63 unsigned i = (page_type >> 24) - 0xf0;
64
65 if (i >= ARRAY_SIZE(page_type_names))
66 return "unknown";
67 return page_type_names[i];
68 }
69
__dump_folio(struct folio * folio,struct page * page,unsigned long pfn,unsigned long idx)70 static void __dump_folio(struct folio *folio, struct page *page,
71 unsigned long pfn, unsigned long idx)
72 {
73 struct address_space *mapping = folio_mapping(folio);
74 int mapcount = atomic_read(&page->_mapcount) + 1;
75 char *type = "";
76
77 if (page_mapcount_is_type(mapcount))
78 mapcount = 0;
79
80 pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
81 folio_ref_count(folio), mapcount, mapping,
82 folio->index + idx, pfn);
83 if (folio_test_large(folio)) {
84 int pincount = 0;
85
86 if (folio_has_pincount(folio))
87 pincount = atomic_read(&folio->_pincount);
88
89 pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
90 folio_order(folio),
91 folio_mapcount(folio),
92 folio_entire_mapcount(folio),
93 folio_nr_pages_mapped(folio),
94 pincount);
95 }
96
97 #ifdef CONFIG_MEMCG
98 if (folio->memcg_data)
99 pr_warn("memcg:%lx\n", folio->memcg_data);
100 #endif
101 if (folio_test_ksm(folio))
102 type = "ksm ";
103 else if (folio_test_anon(folio))
104 type = "anon ";
105 else if (mapping)
106 dump_mapping(mapping);
107 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
108
109 /*
110 * Accessing the pageblock without the zone lock. It could change to
111 * "isolate" again in the meantime, but since we are just dumping the
112 * state for debugging, it should be fine to accept a bit of
113 * inaccuracy here due to racing.
114 */
115 pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
116 is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
117 if (page_has_type(&folio->page))
118 pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24,
119 page_type_name(folio->page.page_type));
120
121 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
122 sizeof(unsigned long), page,
123 sizeof(struct page), false);
124 if (folio_test_large(folio))
125 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
126 sizeof(unsigned long), folio,
127 2 * sizeof(struct page), false);
128 }
129
__dump_page(const struct page * page)130 static void __dump_page(const struct page *page)
131 {
132 struct page_snapshot ps;
133
134 snapshot_page(&ps, page);
135 if (!snapshot_page_is_faithful(&ps))
136 pr_warn("page does not match folio\n");
137
138 __dump_folio(&ps.folio_snapshot, &ps.page_snapshot, ps.pfn, ps.idx);
139 }
140
dump_page(const struct page * page,const char * reason)141 void dump_page(const struct page *page, const char *reason)
142 {
143 if (PagePoisoned(page))
144 pr_warn("page:%p is uninitialized and poisoned\n", page);
145 else
146 __dump_page(page);
147 if (reason)
148 pr_warn("page dumped because: %s\n", reason);
149 dump_page_owner(page);
150 }
151 EXPORT_SYMBOL(dump_page);
152
153 #ifdef CONFIG_DEBUG_VM
154
dump_vma(const struct vm_area_struct * vma)155 void dump_vma(const struct vm_area_struct *vma)
156 {
157 pr_emerg("vma %px start %px end %px mm %px\n"
158 "prot %lx anon_vma %px vm_ops %px\n"
159 "pgoff %lx file %px private_data %px\n"
160 #ifdef CONFIG_PER_VMA_LOCK
161 "refcnt %x\n"
162 #endif
163 "flags: %#lx(%pGv)\n",
164 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
165 (unsigned long)pgprot_val(vma->vm_page_prot),
166 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
167 vma->vm_file, vma->vm_private_data,
168 #ifdef CONFIG_PER_VMA_LOCK
169 refcount_read(&vma->vm_refcnt),
170 #endif
171 vma->vm_flags, &vma->vm_flags);
172 }
173 EXPORT_SYMBOL(dump_vma);
174
dump_mm(const struct mm_struct * mm)175 void dump_mm(const struct mm_struct *mm)
176 {
177 pr_emerg("mm %px task_size %lu\n"
178 "mmap_base %lu mmap_legacy_base %lu\n"
179 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
180 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
181 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
182 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
183 "start_brk %lx brk %lx start_stack %lx\n"
184 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
185 "binfmt %px flags %lx\n"
186 #ifdef CONFIG_AIO
187 "ioctx_table %px\n"
188 #endif
189 #ifdef CONFIG_MEMCG
190 "owner %px "
191 #endif
192 "exe_file %px\n"
193 #ifdef CONFIG_MMU_NOTIFIER
194 "notifier_subscriptions %px\n"
195 #endif
196 #ifdef CONFIG_NUMA_BALANCING
197 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
198 #endif
199 "tlb_flush_pending %d\n"
200 "def_flags: %#lx(%pGv)\n",
201
202 mm, mm->task_size,
203 mm->mmap_base, mm->mmap_legacy_base,
204 mm->pgd, atomic_read(&mm->mm_users),
205 atomic_read(&mm->mm_count),
206 mm_pgtables_bytes(mm),
207 mm->map_count,
208 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
209 (u64)atomic64_read(&mm->pinned_vm),
210 mm->data_vm, mm->exec_vm, mm->stack_vm,
211 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
212 mm->start_brk, mm->brk, mm->start_stack,
213 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
214 mm->binfmt, mm->flags,
215 #ifdef CONFIG_AIO
216 mm->ioctx_table,
217 #endif
218 #ifdef CONFIG_MEMCG
219 mm->owner,
220 #endif
221 mm->exe_file,
222 #ifdef CONFIG_MMU_NOTIFIER
223 mm->notifier_subscriptions,
224 #endif
225 #ifdef CONFIG_NUMA_BALANCING
226 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
227 #endif
228 atomic_read(&mm->tlb_flush_pending),
229 mm->def_flags, &mm->def_flags
230 );
231 }
232 EXPORT_SYMBOL(dump_mm);
233
dump_vmg(const struct vma_merge_struct * vmg,const char * reason)234 void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
235 {
236 if (reason)
237 pr_warn("vmg %px dumped because: %s\n", vmg, reason);
238
239 if (!vmg) {
240 pr_warn("vmg %px state: (NULL)\n", vmg);
241 return;
242 }
243
244 pr_warn("vmg %px state: mm %px pgoff %lx\n"
245 "vmi %px [%lx,%lx)\n"
246 "prev %px middle %px next %px target %px\n"
247 "start %lx end %lx flags %lx\n"
248 "file %px anon_vma %px policy %px\n"
249 "uffd_ctx %px\n"
250 "anon_name %px\n"
251 "state %x\n"
252 "just_expand %d\n"
253 "__adjust_middle_start %d __adjust_next_start %d\n"
254 "__remove_middle %d __remove_next %d\n",
255 vmg, vmg->mm, vmg->pgoff,
256 vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0,
257 vmg->vmi ? vma_iter_end(vmg->vmi) : 0,
258 vmg->prev, vmg->middle, vmg->next, vmg->target,
259 vmg->start, vmg->end, vmg->vm_flags,
260 vmg->file, vmg->anon_vma, vmg->policy,
261 #ifdef CONFIG_USERFAULTFD
262 vmg->uffd_ctx.ctx,
263 #else
264 (void *)0,
265 #endif
266 vmg->anon_name,
267 (int)vmg->state,
268 vmg->just_expand,
269 vmg->__adjust_middle_start, vmg->__adjust_next_start,
270 vmg->__remove_middle, vmg->__remove_next);
271
272 if (vmg->mm) {
273 pr_warn("vmg %px mm:\n", vmg);
274 dump_mm(vmg->mm);
275 } else {
276 pr_warn("vmg %px mm: (NULL)\n", vmg);
277 }
278
279 if (vmg->prev) {
280 pr_warn("vmg %px prev:\n", vmg);
281 dump_vma(vmg->prev);
282 } else {
283 pr_warn("vmg %px prev: (NULL)\n", vmg);
284 }
285
286 if (vmg->middle) {
287 pr_warn("vmg %px middle:\n", vmg);
288 dump_vma(vmg->middle);
289 } else {
290 pr_warn("vmg %px middle: (NULL)\n", vmg);
291 }
292
293 if (vmg->next) {
294 pr_warn("vmg %px next:\n", vmg);
295 dump_vma(vmg->next);
296 } else {
297 pr_warn("vmg %px next: (NULL)\n", vmg);
298 }
299
300 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
301 if (vmg->vmi) {
302 pr_warn("vmg %px vmi:\n", vmg);
303 vma_iter_dump_tree(vmg->vmi);
304 } else {
305 pr_warn("vmg %px vmi: (NULL)\n", vmg);
306 }
307 #endif
308 }
309 EXPORT_SYMBOL(dump_vmg);
310
311 static bool page_init_poisoning __read_mostly = true;
312
setup_vm_debug(char * str)313 static int __init setup_vm_debug(char *str)
314 {
315 bool __page_init_poisoning = true;
316
317 /*
318 * Calling vm_debug with no arguments is equivalent to requesting
319 * to enable all debugging options we can control.
320 */
321 if (*str++ != '=' || !*str)
322 goto out;
323
324 __page_init_poisoning = false;
325 if (*str == '-')
326 goto out;
327
328 while (*str) {
329 switch (tolower(*str)) {
330 case'p':
331 __page_init_poisoning = true;
332 break;
333 default:
334 pr_err("vm_debug option '%c' unknown. skipped\n",
335 *str);
336 }
337
338 str++;
339 }
340 out:
341 if (page_init_poisoning && !__page_init_poisoning)
342 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
343
344 page_init_poisoning = __page_init_poisoning;
345
346 return 1;
347 }
348 __setup("vm_debug", setup_vm_debug);
349
page_init_poison(struct page * page,size_t size)350 void page_init_poison(struct page *page, size_t size)
351 {
352 if (page_init_poisoning)
353 memset(page, PAGE_POISON_PATTERN, size);
354 }
355
vma_iter_dump_tree(const struct vma_iterator * vmi)356 void vma_iter_dump_tree(const struct vma_iterator *vmi)
357 {
358 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
359 mas_dump(&vmi->mas);
360 mt_dump(vmi->mas.tree, mt_dump_hex);
361 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
362 }
363
364 #endif /* CONFIG_DEBUG_VM */
365