Merge tag 'vfio-v5.15-rc1' of git://github.com/awilliam/linux-vfio
[linux-2.6-microblaze.git] / include / net / page_pool.h
index 3dd62dd..a408240 100644 (file)
                                        * Please note DMA-sync-for-CPU is still
                                        * device driver responsibility
                                        */
-#define PP_FLAG_ALL            (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
+#define PP_FLAG_PAGE_FRAG      BIT(2) /* for page frag feature */
+#define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
+                                PP_FLAG_DMA_SYNC_DEV |\
+                                PP_FLAG_PAGE_FRAG)
 
 /*
  * Fast allocation side cache array/stack
@@ -88,6 +91,9 @@ struct page_pool {
        unsigned long defer_warn;
 
        u32 pages_state_hold_cnt;
+       unsigned int frag_offset;
+       struct page *frag_page;
+       long frag_users;
 
        /*
         * Data structure for allocation side
@@ -137,6 +143,18 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
        return page_pool_alloc_pages(pool, gfp);
 }
 
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+                                 unsigned int size, gfp_t gfp);
+
+static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
+                                                   unsigned int *offset,
+                                                   unsigned int size)
+{
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       return page_pool_alloc_frag(pool, offset, size, gfp);
+}
+
 /* get the stored dma direction. A driver might decide to treat this locally and
  * avoid the extra cache line from page_pool to determine the direction
  */
@@ -198,19 +216,48 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
        page_pool_put_full_page(pool, page, true);
 }
 
+#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT        \
+               (sizeof(dma_addr_t) > sizeof(unsigned long))
+
 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
 {
-       dma_addr_t ret = page->dma_addr[0];
-       if (sizeof(dma_addr_t) > sizeof(unsigned long))
-               ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
+       dma_addr_t ret = page->dma_addr;
+
+       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+               ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
+
        return ret;
 }
 
 static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
 {
-       page->dma_addr[0] = addr;
-       if (sizeof(dma_addr_t) > sizeof(unsigned long))
-               page->dma_addr[1] = upper_32_bits(addr);
+       page->dma_addr = addr;
+       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+               page->dma_addr_upper = upper_32_bits(addr);
+}
+
+static inline void page_pool_set_frag_count(struct page *page, long nr)
+{
+       atomic_long_set(&page->pp_frag_count, nr);
+}
+
+static inline long page_pool_atomic_sub_frag_count_return(struct page *page,
+                                                         long nr)
+{
+       long ret;
+
+       /* As suggested by Alexander, atomic_long_read() may cover up the
+        * reference count errors, so avoid calling atomic_long_read() in
+        * the cases of freeing or draining the page_frags, where we would
+        * not expect it to match or that are slowpath anyway.
+        */
+       if (__builtin_constant_p(nr) &&
+           atomic_long_read(&page->pp_frag_count) == nr)
+               return 0;
+
+       ret = atomic_long_sub_return(nr, &page->pp_frag_count);
+       WARN_ON(ret < 0);
+       return ret;
 }
 
 static inline bool is_page_pool_compiled_in(void)
@@ -253,11 +300,4 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
                spin_unlock_bh(&pool->ring.producer_lock);
 }
 
-/* Store mem_info on struct page and use it while recycling skb frags */
-static inline
-void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
-{
-       page->pp = pp;
-}
-
 #endif /* _NET_PAGE_POOL_H */