Merge tag 'platform-drivers-x86-v5.13-2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / mm / page_owner.c
index d15c7c4..adfabb5 100644 (file)
@@ -27,6 +27,7 @@ struct page_owner {
        depot_stack_handle_t handle;
        depot_stack_handle_t free_handle;
        u64 ts_nsec;
+       u64 free_ts_nsec;
        pid_t pid;
 };
 
@@ -41,13 +42,7 @@ static void init_early_allocated_pages(void);
 
 static int __init early_page_owner_param(char *buf)
 {
-       if (!buf)
-               return -EINVAL;
-
-       if (strcmp(buf, "on") == 0)
-               page_owner_enabled = true;
-
-       return 0;
+       return kstrtobool(buf, &page_owner_enabled);
 }
 early_param("page_owner", early_page_owner_param);
 
@@ -103,42 +98,30 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
        return (void *)page_ext + page_owner_ops.offset;
 }
 
-static inline bool check_recursive_alloc(unsigned long *entries,
-                                        unsigned int nr_entries,
-                                        unsigned long ip)
-{
-       unsigned int i;
-
-       for (i = 0; i < nr_entries; i++) {
-               if (entries[i] == ip)
-                       return true;
-       }
-       return false;
-}
-
 static noinline depot_stack_handle_t save_stack(gfp_t flags)
 {
        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
        depot_stack_handle_t handle;
        unsigned int nr_entries;
 
-       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
-
        /*
-        * We need to check recursion here because our request to
-        * stackdepot could trigger memory allocation to save new
-        * entry. New memory allocation would reach here and call
-        * stack_depot_save_entries() again if we don't catch it. There is
-        * still not enough memory in stackdepot so it would try to
-        * allocate memory again and loop forever.
+        * Avoid recursion.
+        *
+        * Sometimes page metadata allocation tracking requires more
+        * memory to be allocated:
+        * - when new stack trace is saved to stack depot
+        * - when backtrace itself is calculated (ia64)
         */
-       if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
+       if (current->in_page_owner)
                return dummy_handle;
+       current->in_page_owner = 1;
 
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
        handle = stack_depot_save(entries, nr_entries, flags);
        if (!handle)
                handle = failure_handle;
 
+       current->in_page_owner = 0;
        return handle;
 }
 
@@ -146,25 +129,27 @@ void __reset_page_owner(struct page *page, unsigned int order)
 {
        int i;
        struct page_ext *page_ext;
-       depot_stack_handle_t handle = 0;
+       depot_stack_handle_t handle;
        struct page_owner *page_owner;
-
-       handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+       u64 free_ts_nsec = local_clock();
 
        page_ext = lookup_page_ext(page);
        if (unlikely(!page_ext))
                return;
+
+       handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
        for (i = 0; i < (1 << order); i++) {
                __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
                page_owner = get_page_owner(page_ext);
                page_owner->free_handle = handle;
+               page_owner->free_ts_nsec = free_ts_nsec;
                page_ext = page_ext_next(page_ext);
        }
 }
 
-static inline void __set_page_owner_handle(struct page *page,
-       struct page_ext *page_ext, depot_stack_handle_t handle,
-       unsigned int order, gfp_t gfp_mask)
+static inline void __set_page_owner_handle(struct page_ext *page_ext,
+                                       depot_stack_handle_t handle,
+                                       unsigned int order, gfp_t gfp_mask)
 {
        struct page_owner *page_owner;
        int i;
@@ -194,7 +179,7 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
                return;
 
        handle = save_stack(gfp_mask);
-       __set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
+       __set_page_owner_handle(page_ext, handle, order, gfp_mask);
 }
 
 void __set_page_owner_migrate_reason(struct page *page, int reason)
@@ -243,11 +228,12 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
        new_page_owner->handle = old_page_owner->handle;
        new_page_owner->pid = old_page_owner->pid;
        new_page_owner->ts_nsec = old_page_owner->ts_nsec;
+       new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
 
        /*
         * We don't clear the bit on the oldpage as it's going to be freed
         * after migration. Until then, the info can be useful in case of
-        * a bug, and the overal stats will be off a bit only temporarily.
+        * a bug, and the overall stats will be off a bit only temporarily.
         * Also, migrate_misplaced_transhuge_page() can still fail the
         * migration and then we want the oldpage to retain the info. But
         * in that case we also don't need to explicitly clear the info from
@@ -356,10 +342,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
                return -ENOMEM;
 
        ret = snprintf(kbuf, count,
-                       "Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns\n",
+                       "Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns, free_ts %llu ns\n",
                        page_owner->order, page_owner->gfp_mask,
                        &page_owner->gfp_mask, page_owner->pid,
-                       page_owner->ts_nsec);
+                       page_owner->ts_nsec, page_owner->free_ts_nsec);
 
        if (ret >= count)
                goto err;
@@ -435,9 +421,9 @@ void __dump_page_owner(struct page *page)
        else
                pr_alert("page_owner tracks the page as freed\n");
 
-       pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu\n",
+       pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu, free_ts %llu\n",
                 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
-                page_owner->pid, page_owner->ts_nsec);
+                page_owner->pid, page_owner->ts_nsec, page_owner->free_ts_nsec);
 
        handle = READ_ONCE(page_owner->handle);
        if (!handle) {
@@ -612,7 +598,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
                                continue;
 
                        /* Found early allocated page */
-                       __set_page_owner_handle(page, page_ext, early_handle,
+                       __set_page_owner_handle(page_ext, early_handle,
                                                0, 0);
                        count++;
                }