mm: add a signature in struct page
authorMatteo Croce <mcroce@microsoft.com>
Mon, 7 Jun 2021 19:02:36 +0000 (21:02 +0200)
committerDavid S. Miller <davem@davemloft.net>
Mon, 7 Jun 2021 21:11:47 +0000 (14:11 -0700)
This is needed by the page_pool to avoid recycling a page not allocated
via page_pool.

The page->signature field is aliased to page->lru.next and
page->compound_head, but it can't be set by mistake because the
signature value is a bad pointer, and can't trigger a false positive
in PageTail() because the last bit is 0.

Co-developed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Matteo Croce <mcroce@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/mm.h
include/linux/mm_types.h
include/linux/poison.h
net/core/page_pool.c

index c274f75..a0434e8 100644 (file)
@@ -1668,10 +1668,11 @@ struct address_space *page_mapping(struct page *page);
 static inline bool page_is_pfmemalloc(const struct page *page)
 {
        /*
-        * Page index cannot be this large so this must be
-        * a pfmemalloc page.
+        * lru.next has bit 1 set if the page is allocated from the
+        * pfmemalloc reserves.  Callers may simply overwrite it if
+        * they do not need to preserve that information.
         */
-       return page->index == -1UL;
+       return (uintptr_t)page->lru.next & BIT(1);
 }
 
 /*
@@ -1680,12 +1681,12 @@ static inline bool page_is_pfmemalloc(const struct page *page)
  */
 static inline void set_page_pfmemalloc(struct page *page)
 {
-       page->index = -1UL;
+       page->lru.next = (void *)BIT(1);
 }
 
 static inline void clear_page_pfmemalloc(struct page *page)
 {
-       page->index = 0;
+       page->lru.next = NULL;
 }
 
 /*
index 5aacc1c..ed6862e 100644 (file)
@@ -96,6 +96,13 @@ struct page {
                        unsigned long private;
                };
                struct {        /* page_pool used by netstack */
+                       /**
+                        * @pp_magic: magic value to avoid recycling non
+                        * page_pool allocated pages.
+                        */
+                       unsigned long pp_magic;
+                       struct page_pool *pp;
+                       unsigned long _pp_mapping_pad;
                        /**
                         * @dma_addr: might require a 64-bit value on
                         * 32-bit architectures.
index aff1c92..d62ef5a 100644 (file)
@@ -78,4 +78,7 @@
 /********** security/ **********/
 #define KEY_DESTROY            0xbd
 
+/********** net/core/page_pool.c **********/
+#define PP_SIGNATURE           (0x40 + POISON_POINTER_DELTA)
+
 #endif
index 3c4c4c7..e1321bc 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/page-flags.h>
 #include <linux/mm.h> /* for __put_page() */
+#include <linux/poison.h>
 
 #include <trace/events/page_pool.h>
 
@@ -221,6 +222,8 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
                return NULL;
        }
 
+       page->pp_magic |= PP_SIGNATURE;
+
        /* Track how many pages are held 'in-flight' */
        pool->pages_state_hold_cnt++;
        trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
@@ -263,6 +266,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
                        put_page(page);
                        continue;
                }
+               page->pp_magic |= PP_SIGNATURE;
                pool->alloc.cache[pool->alloc.count++] = page;
                /* Track how many pages are held 'in-flight' */
                pool->pages_state_hold_cnt++;
@@ -341,6 +345,8 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
                             DMA_ATTR_SKIP_CPU_SYNC);
        page_pool_set_dma_addr(page, 0);
 skip_dma_unmap:
+       page->pp_magic = 0;
+
        /* This may be the last page returned, releasing the pool, so
         * it is not safe to reference pool afterwards.
         */