mm/page_owner: record and dump free_pid and free_tgid
[linux-2.6-microblaze.git] / mm / page_owner.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 #include <linux/memcontrol.h>
14 #include <linux/sched/clock.h>
15
16 #include "internal.h"
17
18 /*
19  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20  * to use off stack temporal storage
21  */
22 #define PAGE_OWNER_STACK_DEPTH (16)
23
24 struct page_owner {
25         unsigned short order;
26         short last_migrate_reason;
27         gfp_t gfp_mask;
28         depot_stack_handle_t handle;
29         depot_stack_handle_t free_handle;
30         u64 ts_nsec;
31         u64 free_ts_nsec;
32         char comm[TASK_COMM_LEN];
33         pid_t pid;
34         pid_t tgid;
35         pid_t free_pid;
36         pid_t free_tgid;
37 };
38
39 static bool page_owner_enabled __initdata;
40 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
41
42 static depot_stack_handle_t dummy_handle;
43 static depot_stack_handle_t failure_handle;
44 static depot_stack_handle_t early_handle;
45
46 static void init_early_allocated_pages(void);
47
48 static int __init early_page_owner_param(char *buf)
49 {
50         int ret = kstrtobool(buf, &page_owner_enabled);
51
52         if (page_owner_enabled)
53                 stack_depot_request_early_init();
54
55         return ret;
56 }
57 early_param("page_owner", early_page_owner_param);
58
59 static __init bool need_page_owner(void)
60 {
61         return page_owner_enabled;
62 }
63
64 static __always_inline depot_stack_handle_t create_dummy_stack(void)
65 {
66         unsigned long entries[4];
67         unsigned int nr_entries;
68
69         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
70         return stack_depot_save(entries, nr_entries, GFP_KERNEL);
71 }
72
73 static noinline void register_dummy_stack(void)
74 {
75         dummy_handle = create_dummy_stack();
76 }
77
78 static noinline void register_failure_stack(void)
79 {
80         failure_handle = create_dummy_stack();
81 }
82
83 static noinline void register_early_stack(void)
84 {
85         early_handle = create_dummy_stack();
86 }
87
88 static __init void init_page_owner(void)
89 {
90         if (!page_owner_enabled)
91                 return;
92
93         register_dummy_stack();
94         register_failure_stack();
95         register_early_stack();
96         static_branch_enable(&page_owner_inited);
97         init_early_allocated_pages();
98 }
99
100 struct page_ext_operations page_owner_ops = {
101         .size = sizeof(struct page_owner),
102         .need = need_page_owner,
103         .init = init_page_owner,
104         .need_shared_flags = true,
105 };
106
107 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
108 {
109         return page_ext_data(page_ext, &page_owner_ops);
110 }
111
112 static noinline depot_stack_handle_t save_stack(gfp_t flags)
113 {
114         unsigned long entries[PAGE_OWNER_STACK_DEPTH];
115         depot_stack_handle_t handle;
116         unsigned int nr_entries;
117
118         /*
119          * Avoid recursion.
120          *
121          * Sometimes page metadata allocation tracking requires more
122          * memory to be allocated:
123          * - when new stack trace is saved to stack depot
124          * - when backtrace itself is calculated (ia64)
125          */
126         if (current->in_page_owner)
127                 return dummy_handle;
128         current->in_page_owner = 1;
129
130         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
131         handle = stack_depot_save(entries, nr_entries, flags);
132         if (!handle)
133                 handle = failure_handle;
134
135         current->in_page_owner = 0;
136         return handle;
137 }
138
139 void __reset_page_owner(struct page *page, unsigned short order)
140 {
141         int i;
142         struct page_ext *page_ext;
143         depot_stack_handle_t handle;
144         struct page_owner *page_owner;
145         u64 free_ts_nsec = local_clock();
146
147         page_ext = page_ext_get(page);
148         if (unlikely(!page_ext))
149                 return;
150
151         handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
152         for (i = 0; i < (1 << order); i++) {
153                 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
154                 page_owner = get_page_owner(page_ext);
155                 page_owner->free_handle = handle;
156                 page_owner->free_ts_nsec = free_ts_nsec;
157                 page_owner->free_pid = current->pid;
158                 page_owner->free_tgid = current->tgid;
159                 page_ext = page_ext_next(page_ext);
160         }
161         page_ext_put(page_ext);
162 }
163
164 static inline void __set_page_owner_handle(struct page_ext *page_ext,
165                                         depot_stack_handle_t handle,
166                                         unsigned short order, gfp_t gfp_mask)
167 {
168         struct page_owner *page_owner;
169         int i;
170         u64 ts_nsec = local_clock();
171
172         for (i = 0; i < (1 << order); i++) {
173                 page_owner = get_page_owner(page_ext);
174                 page_owner->handle = handle;
175                 page_owner->order = order;
176                 page_owner->gfp_mask = gfp_mask;
177                 page_owner->last_migrate_reason = -1;
178                 page_owner->pid = current->pid;
179                 page_owner->tgid = current->tgid;
180                 page_owner->ts_nsec = ts_nsec;
181                 strscpy(page_owner->comm, current->comm,
182                         sizeof(page_owner->comm));
183                 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
184                 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
185
186                 page_ext = page_ext_next(page_ext);
187         }
188 }
189
190 noinline void __set_page_owner(struct page *page, unsigned short order,
191                                         gfp_t gfp_mask)
192 {
193         struct page_ext *page_ext;
194         depot_stack_handle_t handle;
195
196         handle = save_stack(gfp_mask);
197
198         page_ext = page_ext_get(page);
199         if (unlikely(!page_ext))
200                 return;
201         __set_page_owner_handle(page_ext, handle, order, gfp_mask);
202         page_ext_put(page_ext);
203 }
204
205 void __set_page_owner_migrate_reason(struct page *page, int reason)
206 {
207         struct page_ext *page_ext = page_ext_get(page);
208         struct page_owner *page_owner;
209
210         if (unlikely(!page_ext))
211                 return;
212
213         page_owner = get_page_owner(page_ext);
214         page_owner->last_migrate_reason = reason;
215         page_ext_put(page_ext);
216 }
217
218 void __split_page_owner(struct page *page, unsigned int nr)
219 {
220         int i;
221         struct page_ext *page_ext = page_ext_get(page);
222         struct page_owner *page_owner;
223
224         if (unlikely(!page_ext))
225                 return;
226
227         for (i = 0; i < nr; i++) {
228                 page_owner = get_page_owner(page_ext);
229                 page_owner->order = 0;
230                 page_ext = page_ext_next(page_ext);
231         }
232         page_ext_put(page_ext);
233 }
234
235 void __folio_copy_owner(struct folio *newfolio, struct folio *old)
236 {
237         struct page_ext *old_ext;
238         struct page_ext *new_ext;
239         struct page_owner *old_page_owner, *new_page_owner;
240
241         old_ext = page_ext_get(&old->page);
242         if (unlikely(!old_ext))
243                 return;
244
245         new_ext = page_ext_get(&newfolio->page);
246         if (unlikely(!new_ext)) {
247                 page_ext_put(old_ext);
248                 return;
249         }
250
251         old_page_owner = get_page_owner(old_ext);
252         new_page_owner = get_page_owner(new_ext);
253         new_page_owner->order = old_page_owner->order;
254         new_page_owner->gfp_mask = old_page_owner->gfp_mask;
255         new_page_owner->last_migrate_reason =
256                 old_page_owner->last_migrate_reason;
257         new_page_owner->handle = old_page_owner->handle;
258         new_page_owner->pid = old_page_owner->pid;
259         new_page_owner->tgid = old_page_owner->tgid;
260         new_page_owner->free_pid = old_page_owner->free_pid;
261         new_page_owner->free_tgid = old_page_owner->free_tgid;
262         new_page_owner->ts_nsec = old_page_owner->ts_nsec;
263         new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
264         strcpy(new_page_owner->comm, old_page_owner->comm);
265
266         /*
267          * We don't clear the bit on the old folio as it's going to be freed
268          * after migration. Until then, the info can be useful in case of
269          * a bug, and the overall stats will be off a bit only temporarily.
270          * Also, migrate_misplaced_transhuge_page() can still fail the
271          * migration and then we want the old folio to retain the info. But
272          * in that case we also don't need to explicitly clear the info from
273          * the new page, which will be freed.
274          */
275         __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
276         __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
277         page_ext_put(new_ext);
278         page_ext_put(old_ext);
279 }
280
281 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
282                                        pg_data_t *pgdat, struct zone *zone)
283 {
284         struct page *page;
285         struct page_ext *page_ext;
286         struct page_owner *page_owner;
287         unsigned long pfn, block_end_pfn;
288         unsigned long end_pfn = zone_end_pfn(zone);
289         unsigned long count[MIGRATE_TYPES] = { 0, };
290         int pageblock_mt, page_mt;
291         int i;
292
293         /* Scan block by block. First and last block may be incomplete */
294         pfn = zone->zone_start_pfn;
295
296         /*
297          * Walk the zone in pageblock_nr_pages steps. If a page block spans
298          * a zone boundary, it will be double counted between zones. This does
299          * not matter as the mixed block count will still be correct
300          */
301         for (; pfn < end_pfn; ) {
302                 page = pfn_to_online_page(pfn);
303                 if (!page) {
304                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
305                         continue;
306                 }
307
308                 block_end_pfn = pageblock_end_pfn(pfn);
309                 block_end_pfn = min(block_end_pfn, end_pfn);
310
311                 pageblock_mt = get_pageblock_migratetype(page);
312
313                 for (; pfn < block_end_pfn; pfn++) {
314                         /* The pageblock is online, no need to recheck. */
315                         page = pfn_to_page(pfn);
316
317                         if (page_zone(page) != zone)
318                                 continue;
319
320                         if (PageBuddy(page)) {
321                                 unsigned long freepage_order;
322
323                                 freepage_order = buddy_order_unsafe(page);
324                                 if (freepage_order <= MAX_ORDER)
325                                         pfn += (1UL << freepage_order) - 1;
326                                 continue;
327                         }
328
329                         if (PageReserved(page))
330                                 continue;
331
332                         page_ext = page_ext_get(page);
333                         if (unlikely(!page_ext))
334                                 continue;
335
336                         if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
337                                 goto ext_put_continue;
338
339                         page_owner = get_page_owner(page_ext);
340                         page_mt = gfp_migratetype(page_owner->gfp_mask);
341                         if (pageblock_mt != page_mt) {
342                                 if (is_migrate_cma(pageblock_mt))
343                                         count[MIGRATE_MOVABLE]++;
344                                 else
345                                         count[pageblock_mt]++;
346
347                                 pfn = block_end_pfn;
348                                 page_ext_put(page_ext);
349                                 break;
350                         }
351                         pfn += (1UL << page_owner->order) - 1;
352 ext_put_continue:
353                         page_ext_put(page_ext);
354                 }
355         }
356
357         /* Print counts */
358         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
359         for (i = 0; i < MIGRATE_TYPES; i++)
360                 seq_printf(m, "%12lu ", count[i]);
361         seq_putc(m, '\n');
362 }
363
364 /*
365  * Looking for memcg information and print it out
366  */
367 static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
368                                          struct page *page)
369 {
370 #ifdef CONFIG_MEMCG
371         unsigned long memcg_data;
372         struct mem_cgroup *memcg;
373         bool online;
374         char name[80];
375
376         rcu_read_lock();
377         memcg_data = READ_ONCE(page->memcg_data);
378         if (!memcg_data)
379                 goto out_unlock;
380
381         if (memcg_data & MEMCG_DATA_OBJCGS)
382                 ret += scnprintf(kbuf + ret, count - ret,
383                                 "Slab cache page\n");
384
385         memcg = page_memcg_check(page);
386         if (!memcg)
387                 goto out_unlock;
388
389         online = (memcg->css.flags & CSS_ONLINE);
390         cgroup_name(memcg->css.cgroup, name, sizeof(name));
391         ret += scnprintf(kbuf + ret, count - ret,
392                         "Charged %sto %smemcg %s\n",
393                         PageMemcgKmem(page) ? "(via objcg) " : "",
394                         online ? "" : "offline ",
395                         name);
396 out_unlock:
397         rcu_read_unlock();
398 #endif /* CONFIG_MEMCG */
399
400         return ret;
401 }
402
403 static ssize_t
404 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
405                 struct page *page, struct page_owner *page_owner,
406                 depot_stack_handle_t handle)
407 {
408         int ret, pageblock_mt, page_mt;
409         char *kbuf;
410
411         count = min_t(size_t, count, PAGE_SIZE);
412         kbuf = kmalloc(count, GFP_KERNEL);
413         if (!kbuf)
414                 return -ENOMEM;
415
416         ret = scnprintf(kbuf, count,
417                         "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n",
418                         page_owner->order, page_owner->gfp_mask,
419                         &page_owner->gfp_mask, page_owner->pid,
420                         page_owner->tgid, page_owner->comm,
421                         page_owner->ts_nsec);
422
423         /* Print information relevant to grouping pages by mobility */
424         pageblock_mt = get_pageblock_migratetype(page);
425         page_mt  = gfp_migratetype(page_owner->gfp_mask);
426         ret += scnprintf(kbuf + ret, count - ret,
427                         "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
428                         pfn,
429                         migratetype_names[page_mt],
430                         pfn >> pageblock_order,
431                         migratetype_names[pageblock_mt],
432                         &page->flags);
433
434         ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
435         if (ret >= count)
436                 goto err;
437
438         if (page_owner->last_migrate_reason != -1) {
439                 ret += scnprintf(kbuf + ret, count - ret,
440                         "Page has been migrated, last migrate reason: %s\n",
441                         migrate_reason_names[page_owner->last_migrate_reason]);
442         }
443
444         ret = print_page_owner_memcg(kbuf, count, ret, page);
445
446         ret += snprintf(kbuf + ret, count - ret, "\n");
447         if (ret >= count)
448                 goto err;
449
450         if (copy_to_user(buf, kbuf, ret))
451                 ret = -EFAULT;
452
453         kfree(kbuf);
454         return ret;
455
456 err:
457         kfree(kbuf);
458         return -ENOMEM;
459 }
460
461 void __dump_page_owner(const struct page *page)
462 {
463         struct page_ext *page_ext = page_ext_get((void *)page);
464         struct page_owner *page_owner;
465         depot_stack_handle_t handle;
466         gfp_t gfp_mask;
467         int mt;
468
469         if (unlikely(!page_ext)) {
470                 pr_alert("There is not page extension available.\n");
471                 return;
472         }
473
474         page_owner = get_page_owner(page_ext);
475         gfp_mask = page_owner->gfp_mask;
476         mt = gfp_migratetype(gfp_mask);
477
478         if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
479                 pr_alert("page_owner info is not present (never set?)\n");
480                 page_ext_put(page_ext);
481                 return;
482         }
483
484         if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
485                 pr_alert("page_owner tracks the page as allocated\n");
486         else
487                 pr_alert("page_owner tracks the page as freed\n");
488
489         pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
490                  page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
491                  page_owner->pid, page_owner->tgid, page_owner->comm,
492                  page_owner->ts_nsec, page_owner->free_ts_nsec);
493
494         handle = READ_ONCE(page_owner->handle);
495         if (!handle)
496                 pr_alert("page_owner allocation stack trace missing\n");
497         else
498                 stack_depot_print(handle);
499
500         handle = READ_ONCE(page_owner->free_handle);
501         if (!handle) {
502                 pr_alert("page_owner free stack trace missing\n");
503         } else {
504                 pr_alert("page last free pid %d tgid %d stack trace:\n",
505                           page_owner->free_pid, page_owner->free_tgid);
506                 stack_depot_print(handle);
507         }
508
509         if (page_owner->last_migrate_reason != -1)
510                 pr_alert("page has been migrated, last migrate reason: %s\n",
511                         migrate_reason_names[page_owner->last_migrate_reason]);
512         page_ext_put(page_ext);
513 }
514
515 static ssize_t
516 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
517 {
518         unsigned long pfn;
519         struct page *page;
520         struct page_ext *page_ext;
521         struct page_owner *page_owner;
522         depot_stack_handle_t handle;
523
524         if (!static_branch_unlikely(&page_owner_inited))
525                 return -EINVAL;
526
527         page = NULL;
528         if (*ppos == 0)
529                 pfn = min_low_pfn;
530         else
531                 pfn = *ppos;
532         /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
533         while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
534                 pfn++;
535
536         /* Find an allocated page */
537         for (; pfn < max_pfn; pfn++) {
538                 /*
539                  * This temporary page_owner is required so
540                  * that we can avoid the context switches while holding
541                  * the rcu lock and copying the page owner information to
542                  * user through copy_to_user() or GFP_KERNEL allocations.
543                  */
544                 struct page_owner page_owner_tmp;
545
546                 /*
547                  * If the new page is in a new MAX_ORDER_NR_PAGES area,
548                  * validate the area as existing, skip it if not
549                  */
550                 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
551                         pfn += MAX_ORDER_NR_PAGES - 1;
552                         continue;
553                 }
554
555                 page = pfn_to_page(pfn);
556                 if (PageBuddy(page)) {
557                         unsigned long freepage_order = buddy_order_unsafe(page);
558
559                         if (freepage_order <= MAX_ORDER)
560                                 pfn += (1UL << freepage_order) - 1;
561                         continue;
562                 }
563
564                 page_ext = page_ext_get(page);
565                 if (unlikely(!page_ext))
566                         continue;
567
568                 /*
569                  * Some pages could be missed by concurrent allocation or free,
570                  * because we don't hold the zone lock.
571                  */
572                 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
573                         goto ext_put_continue;
574
575                 /*
576                  * Although we do have the info about past allocation of free
577                  * pages, it's not relevant for current memory usage.
578                  */
579                 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
580                         goto ext_put_continue;
581
582                 page_owner = get_page_owner(page_ext);
583
584                 /*
585                  * Don't print "tail" pages of high-order allocations as that
586                  * would inflate the stats.
587                  */
588                 if (!IS_ALIGNED(pfn, 1 << page_owner->order))
589                         goto ext_put_continue;
590
591                 /*
592                  * Access to page_ext->handle isn't synchronous so we should
593                  * be careful to access it.
594                  */
595                 handle = READ_ONCE(page_owner->handle);
596                 if (!handle)
597                         goto ext_put_continue;
598
599                 /* Record the next PFN to read in the file offset */
600                 *ppos = pfn + 1;
601
602                 page_owner_tmp = *page_owner;
603                 page_ext_put(page_ext);
604                 return print_page_owner(buf, count, pfn, page,
605                                 &page_owner_tmp, handle);
606 ext_put_continue:
607                 page_ext_put(page_ext);
608         }
609
610         return 0;
611 }
612
613 static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
614 {
615         switch (orig) {
616         case SEEK_SET:
617                 file->f_pos = offset;
618                 break;
619         case SEEK_CUR:
620                 file->f_pos += offset;
621                 break;
622         default:
623                 return -EINVAL;
624         }
625         return file->f_pos;
626 }
627
628 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
629 {
630         unsigned long pfn = zone->zone_start_pfn;
631         unsigned long end_pfn = zone_end_pfn(zone);
632         unsigned long count = 0;
633
634         /*
635          * Walk the zone in pageblock_nr_pages steps. If a page block spans
636          * a zone boundary, it will be double counted between zones. This does
637          * not matter as the mixed block count will still be correct
638          */
639         for (; pfn < end_pfn; ) {
640                 unsigned long block_end_pfn;
641
642                 if (!pfn_valid(pfn)) {
643                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
644                         continue;
645                 }
646
647                 block_end_pfn = pageblock_end_pfn(pfn);
648                 block_end_pfn = min(block_end_pfn, end_pfn);
649
650                 for (; pfn < block_end_pfn; pfn++) {
651                         struct page *page = pfn_to_page(pfn);
652                         struct page_ext *page_ext;
653
654                         if (page_zone(page) != zone)
655                                 continue;
656
657                         /*
658                          * To avoid having to grab zone->lock, be a little
659                          * careful when reading buddy page order. The only
660                          * danger is that we skip too much and potentially miss
661                          * some early allocated pages, which is better than
662                          * heavy lock contention.
663                          */
664                         if (PageBuddy(page)) {
665                                 unsigned long order = buddy_order_unsafe(page);
666
667                                 if (order > 0 && order <= MAX_ORDER)
668                                         pfn += (1UL << order) - 1;
669                                 continue;
670                         }
671
672                         if (PageReserved(page))
673                                 continue;
674
675                         page_ext = page_ext_get(page);
676                         if (unlikely(!page_ext))
677                                 continue;
678
679                         /* Maybe overlapping zone */
680                         if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
681                                 goto ext_put_continue;
682
683                         /* Found early allocated page */
684                         __set_page_owner_handle(page_ext, early_handle,
685                                                 0, 0);
686                         count++;
687 ext_put_continue:
688                         page_ext_put(page_ext);
689                 }
690                 cond_resched();
691         }
692
693         pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
694                 pgdat->node_id, zone->name, count);
695 }
696
697 static void init_zones_in_node(pg_data_t *pgdat)
698 {
699         struct zone *zone;
700         struct zone *node_zones = pgdat->node_zones;
701
702         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
703                 if (!populated_zone(zone))
704                         continue;
705
706                 init_pages_in_zone(pgdat, zone);
707         }
708 }
709
710 static void init_early_allocated_pages(void)
711 {
712         pg_data_t *pgdat;
713
714         for_each_online_pgdat(pgdat)
715                 init_zones_in_node(pgdat);
716 }
717
718 static const struct file_operations proc_page_owner_operations = {
719         .read           = read_page_owner,
720         .llseek         = lseek_page_owner,
721 };
722
723 static int __init pageowner_init(void)
724 {
725         if (!static_branch_unlikely(&page_owner_inited)) {
726                 pr_info("page_owner is disabled\n");
727                 return 0;
728         }
729
730         debugfs_create_file("page_owner", 0400, NULL, NULL,
731                             &proc_page_owner_operations);
732
733         return 0;
734 }
735 late_initcall(pageowner_init)