mm, page_owner: decouple freeing stack trace from debug_pagealloc
[linux-2.6-microblaze.git] / mm / page_owner.c
index 813fcb7..de1916a 100644 (file)
@@ -24,9 +24,10 @@ struct page_owner {
        short last_migrate_reason;
        gfp_t gfp_mask;
        depot_stack_handle_t handle;
+       depot_stack_handle_t free_handle;
 };
 
-static bool page_owner_disabled = true;
+static bool page_owner_enabled = false;
 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 
 static depot_stack_handle_t dummy_handle;
@@ -41,7 +42,7 @@ static int __init early_page_owner_param(char *buf)
                return -EINVAL;
 
        if (strcmp(buf, "on") == 0)
-               page_owner_disabled = false;
+               page_owner_enabled = true;
 
        return 0;
 }
@@ -49,10 +50,7 @@ early_param("page_owner", early_page_owner_param);
 
 static bool need_page_owner(void)
 {
-       if (page_owner_disabled)
-               return false;
-
-       return true;
+       return page_owner_enabled;
 }
 
 static __always_inline depot_stack_handle_t create_dummy_stack(void)
@@ -81,7 +79,7 @@ static noinline void register_early_stack(void)
 
 static void init_page_owner(void)
 {
-       if (page_owner_disabled)
+       if (!page_owner_enabled)
                return;
 
        register_dummy_stack();
@@ -102,19 +100,6 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
        return (void *)page_ext + page_owner_ops.offset;
 }
 
-void __reset_page_owner(struct page *page, unsigned int order)
-{
-       int i;
-       struct page_ext *page_ext;
-
-       for (i = 0; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
-               if (unlikely(!page_ext))
-                       continue;
-               __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
-       }
-}
-
 static inline bool check_recursive_alloc(unsigned long *entries,
                                         unsigned int nr_entries,
                                         unsigned long ip)
@@ -154,6 +139,26 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
        return handle;
 }
 
+void __reset_page_owner(struct page *page, unsigned int order)
+{
+       int i;
+       struct page_ext *page_ext;
+       depot_stack_handle_t handle = 0;
+       struct page_owner *page_owner;
+
+       handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+
+       page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+       for (i = 0; i < (1 << order); i++) {
+               __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+               page_owner = get_page_owner(page_ext);
+               page_owner->free_handle = handle;
+               page_ext = page_ext_next(page_ext);
+       }
+}
+
 static inline void __set_page_owner_handle(struct page *page,
        struct page_ext *page_ext, depot_stack_handle_t handle,
        unsigned int order, gfp_t gfp_mask)
@@ -168,8 +173,9 @@ static inline void __set_page_owner_handle(struct page *page,
                page_owner->gfp_mask = gfp_mask;
                page_owner->last_migrate_reason = -1;
                __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+               __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
 
-               page_ext = lookup_page_ext(page + i);
+               page_ext = page_ext_next(page_ext);
        }
 }
 
@@ -207,12 +213,10 @@ void __split_page_owner(struct page *page, unsigned int order)
        if (unlikely(!page_ext))
                return;
 
-       page_owner = get_page_owner(page_ext);
-       page_owner->order = 0;
-       for (i = 1; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
+       for (i = 0; i < (1 << order); i++) {
                page_owner = get_page_owner(page_ext);
                page_owner->order = 0;
+               page_ext = page_ext_next(page_ext);
        }
 }
 
@@ -243,6 +247,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
         * the new page, which will be freed.
         */
        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
+       __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
 }
 
 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@@ -302,7 +307,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                        if (unlikely(!page_ext))
                                continue;
 
-                       if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+                       if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
                                continue;
 
                        page_owner = get_page_owner(page_ext);
@@ -413,20 +418,34 @@ void __dump_page_owner(struct page *page)
        mt = gfpflags_to_migratetype(gfp_mask);
 
        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
-               pr_alert("page_owner info is not active (free page?)\n");
+               pr_alert("page_owner info is not present (never set?)\n");
                return;
        }
 
+       if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+               pr_alert("page_owner tracks the page as allocated\n");
+       else
+               pr_alert("page_owner tracks the page as freed\n");
+
+       pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
+                page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
+
        handle = READ_ONCE(page_owner->handle);
        if (!handle) {
-               pr_alert("page_owner info is not active (free page?)\n");
-               return;
+               pr_alert("page_owner allocation stack trace missing\n");
+       } else {
+               nr_entries = stack_depot_fetch(handle, &entries);
+               stack_trace_print(entries, nr_entries, 0);
        }
 
-       nr_entries = stack_depot_fetch(handle, &entries);
-       pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
-                page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
-       stack_trace_print(entries, nr_entries, 0);
+       handle = READ_ONCE(page_owner->free_handle);
+       if (!handle) {
+               pr_alert("page_owner free stack trace missing\n");
+       } else {
+               nr_entries = stack_depot_fetch(handle, &entries);
+               pr_alert("page last free stack trace:\n");
+               stack_trace_print(entries, nr_entries, 0);
+       }
 
        if (page_owner->last_migrate_reason != -1)
                pr_alert("page has been migrated, last migrate reason: %s\n",
@@ -489,6 +508,13 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
                        continue;
 
+               /*
+                * Although we do have the info about past allocation of free
+                * pages, it's not relevant for current memory usage.
+                */
+               if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+                       continue;
+
                page_owner = get_page_owner(page_ext);
 
                /*