mm: page_owner: detect page_owner recursion via task_struct
authorSergei Trofimovich <slyfox@gentoo.org>
Fri, 30 Apr 2021 05:55:08 +0000 (22:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Apr 2021 18:20:36 +0000 (11:20 -0700)
Before the change page_owner recursion was detected via fetching
backtrace and inspecting it for current instruction pointer.
It has a few problems:

 - it is slightly slow as it requires extra backtrace and a linear stack
   scan of the result

 - it is too late to check if backtrace fetching required memory
   allocation itself (ia64's unwinder requires it).

To simplify recursion tracking let's use page_owner recursion flag in
'struct task_struct'.

The change make page_owner=on work on ia64 by avoiding infinite
recursion in:
  kmalloc()
  -> __set_page_owner()
  -> save_stack()
  -> unwind() [ia64-specific]
  -> build_script()
  -> kmalloc()
  -> __set_page_owner() [we short-circuit here]
  -> save_stack()
  -> unwind() [recursion]

Link: https://lkml.kernel.org/r/20210402115342.1463781-1-slyfox@gentoo.org
Signed-off-by: Sergei Trofimovich <slyfox@gentoo.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/sched.h
mm/page_owner.c

index d7d07aa..9c25c8e 100644 (file)
@@ -841,6 +841,10 @@ struct task_struct {
        /* Stalled due to lack of memory */
        unsigned                        in_memstall:1;
 #endif
+#ifdef CONFIG_PAGE_OWNER
+       /* Used by page_owner=on to detect recursion in page tracking. */
+       unsigned                        in_page_owner:1;
+#endif
 
        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 
index 5c941ca..9661d53 100644 (file)
@@ -98,42 +98,30 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
        return (void *)page_ext + page_owner_ops.offset;
 }
 
-static inline bool check_recursive_alloc(unsigned long *entries,
-                                        unsigned int nr_entries,
-                                        unsigned long ip)
-{
-       unsigned int i;
-
-       for (i = 0; i < nr_entries; i++) {
-               if (entries[i] == ip)
-                       return true;
-       }
-       return false;
-}
-
 static noinline depot_stack_handle_t save_stack(gfp_t flags)
 {
        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
        depot_stack_handle_t handle;
        unsigned int nr_entries;
 
-       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
-
        /*
-        * We need to check recursion here because our request to
-        * stackdepot could trigger memory allocation to save new
-        * entry. New memory allocation would reach here and call
-        * stack_depot_save_entries() again if we don't catch it. There is
-        * still not enough memory in stackdepot so it would try to
-        * allocate memory again and loop forever.
+        * Avoid recursion.
+        *
+        * Sometimes page metadata allocation tracking requires more
+        * memory to be allocated:
+        * - when new stack trace is saved to stack depot
+        * - when backtrace itself is calculated (ia64)
         */
-       if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
+       if (current->in_page_owner)
                return dummy_handle;
+       current->in_page_owner = 1;
 
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
        handle = stack_depot_save(entries, nr_entries, flags);
        if (!handle)
                handle = failure_handle;
 
+       current->in_page_owner = 0;
        return handle;
 }