1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
13 #include <net/page_pool.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/page-flags.h>
17 #include <linux/mm.h> /* for __put_page() */
19 #include <trace/events/page_pool.h>
21 #define DEFER_TIME (msecs_to_jiffies(1000))
22 #define DEFER_WARN_INTERVAL (60 * HZ)
24 static int page_pool_init(struct page_pool *pool,
25 const struct page_pool_params *params)
27 unsigned int ring_qsize = 1024; /* Default */
29 memcpy(&pool->p, params, sizeof(pool->p));
31 /* Validate only known flags were used */
32 if (pool->p.flags & ~(PP_FLAG_ALL))
35 if (pool->p.pool_size)
36 ring_qsize = pool->p.pool_size;
38 /* Sanity limit mem that can be pinned down */
39 if (ring_qsize > 32768)
42 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
43 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
44 * which is the XDP_TX use-case.
46 if (pool->p.flags & PP_FLAG_DMA_MAP) {
47 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
48 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
52 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
53 /* In order to request DMA-sync-for-device the page
56 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
62 /* pool->p.offset has to be set according to the address
63 * offset used by the DMA engine to start copying rx data
67 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
70 atomic_set(&pool->pages_state_release_cnt, 0);
72 /* Driver calling page_pool_create() also call page_pool_destroy() */
73 refcount_set(&pool->user_cnt, 1);
75 if (pool->p.flags & PP_FLAG_DMA_MAP)
76 get_device(pool->p.dev);
81 struct page_pool *page_pool_create(const struct page_pool_params *params)
83 struct page_pool *pool;
86 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
88 return ERR_PTR(-ENOMEM);
90 err = page_pool_init(pool, params);
92 pr_warn("%s() gave up with errno %d\n", __func__, err);
99 EXPORT_SYMBOL(page_pool_create);
101 static void page_pool_return_page(struct page_pool *pool, struct page *page);
104 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
106 struct ptr_ring *r = &pool->ring;
108 int pref_nid; /* preferred NUMA node */
110 /* Quicker fallback, avoid locks when ring is empty */
111 if (__ptr_ring_empty(r))
114 /* Softirq guarantee CPU and thus NUMA node is stable. This,
115 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
118 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
120 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
121 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
124 /* Slower-path: Get pages from locked ring queue */
125 spin_lock(&r->consumer_lock);
127 /* Refill alloc array, but only if NUMA match */
129 page = __ptr_ring_consume(r);
133 if (likely(page_to_nid(page) == pref_nid)) {
134 pool->alloc.cache[pool->alloc.count++] = page;
137 * (1) release 1 page to page-allocator and
138 * (2) break out to fallthrough to alloc_pages_node.
139 * This limit stress on page buddy alloactor.
141 page_pool_return_page(pool, page);
145 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
147 /* Return last page */
148 if (likely(pool->alloc.count > 0))
149 page = pool->alloc.cache[--pool->alloc.count];
151 spin_unlock(&r->consumer_lock);
156 static struct page *__page_pool_get_cached(struct page_pool *pool)
160 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
161 if (likely(pool->alloc.count)) {
163 page = pool->alloc.cache[--pool->alloc.count];
165 page = page_pool_refill_alloc_cache(pool);
171 static void page_pool_dma_sync_for_device(struct page_pool *pool,
173 unsigned int dma_sync_size)
175 dma_sync_size = min(dma_sync_size, pool->p.max_len);
176 dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
177 pool->p.offset, dma_sync_size,
183 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
190 /* We could always set __GFP_COMP, and avoid this branch, as
191 * prep_new_page() can handle order-0 with __GFP_COMP.
196 /* FUTURE development:
198 * Current slow-path essentially falls back to single page
199 * allocations, which doesn't improve performance. This code
200 * need bulk allocation support from the page allocator code.
203 /* Cache was empty, do real allocation */
205 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
207 page = alloc_pages(gfp, pool->p.order);
212 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
215 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
216 * since dma_addr_t can be either 32 or 64 bits and does not always fit
217 * into page private data (i.e 32bit cpu with 64bit DMA caps)
218 * This mapping is kept for lifetime of page, until leaving pool.
220 dma = dma_map_page_attrs(pool->p.dev, page, 0,
221 (PAGE_SIZE << pool->p.order),
222 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
223 if (dma_mapping_error(pool->p.dev, dma)) {
227 page->dma_addr = dma;
229 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
230 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
233 /* Track how many pages are held 'in-flight' */
234 pool->pages_state_hold_cnt++;
236 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
238 /* When page just alloc'ed is should/must have refcnt 1. */
242 /* For using page_pool replace: alloc_pages() API calls, but provide
243 * synchronization guarantee for allocation side.
245 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
249 /* Fast-path: Get a page from cache */
250 page = __page_pool_get_cached(pool);
254 /* Slow-path: cache empty, do real allocation */
255 page = __page_pool_alloc_pages_slow(pool, gfp);
258 EXPORT_SYMBOL(page_pool_alloc_pages);
260 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
261 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
263 #define _distance(a, b) (s32)((a) - (b))
265 static s32 page_pool_inflight(struct page_pool *pool)
267 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
268 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
271 inflight = _distance(hold_cnt, release_cnt);
273 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
274 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
279 /* Disconnects a page (from a page_pool). API users can have a need
280 * to disconnect a page (from a page_pool), to allow it to be used as
281 * a regular page (that will eventually be returned to the normal
282 * page-allocator via put_page).
284 void page_pool_release_page(struct page_pool *pool, struct page *page)
289 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
290 /* Always account for inflight pages, even if we didn't
295 dma = page->dma_addr;
297 /* When page is unmapped, it cannot be returned our pool */
298 dma_unmap_page_attrs(pool->p.dev, dma,
299 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
300 DMA_ATTR_SKIP_CPU_SYNC);
303 /* This may be the last page returned, releasing the pool, so
304 * it is not safe to reference pool afterwards.
306 count = atomic_inc_return(&pool->pages_state_release_cnt);
307 trace_page_pool_state_release(pool, page, count);
309 EXPORT_SYMBOL(page_pool_release_page);
311 /* Return a page to the page allocator, cleaning up our state */
312 static void page_pool_return_page(struct page_pool *pool, struct page *page)
314 page_pool_release_page(pool, page);
317 /* An optimization would be to call __free_pages(page, pool->p.order)
318 * knowing page is not part of page-cache (thus avoiding a
319 * __page_cache_release() call).
323 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
326 /* BH protection not needed if current is serving softirq */
327 if (in_serving_softirq())
328 ret = ptr_ring_produce(&pool->ring, page);
330 ret = ptr_ring_produce_bh(&pool->ring, page);
332 return (ret == 0) ? true : false;
335 /* Only allow direct recycling in special circumstances, into the
336 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
338 * Caller must provide appropriate safe context.
340 static bool page_pool_recycle_in_cache(struct page *page,
341 struct page_pool *pool)
343 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
346 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
347 pool->alloc.cache[pool->alloc.count++] = page;
351 /* page is NOT reusable when:
352 * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
354 static bool pool_page_reusable(struct page_pool *pool, struct page *page)
356 return !page_is_pfmemalloc(page);
359 /* If the page refcnt == 1, this will try to recycle the page.
360 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
361 * the configured size min(dma_sync_size, pool->max_len).
362 * If the page refcnt != 1, then the page will be returned to memory
365 void page_pool_put_page(struct page_pool *pool, struct page *page,
366 unsigned int dma_sync_size, bool allow_direct)
368 /* This allocator is optimized for the XDP mode that uses
369 * one-frame-per-page, but have fallbacks that act like the
370 * regular page allocator APIs.
372 * refcnt == 1 means page_pool owns page, and can recycle it.
374 if (likely(page_ref_count(page) == 1 &&
375 pool_page_reusable(pool, page))) {
376 /* Read barrier done in page_ref_count / READ_ONCE */
378 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
379 page_pool_dma_sync_for_device(pool, page,
382 if (allow_direct && in_serving_softirq())
383 if (page_pool_recycle_in_cache(page, pool))
386 if (!page_pool_recycle_in_ring(pool, page)) {
387 /* Cache full, fallback to free pages */
388 page_pool_return_page(pool, page);
392 /* Fallback/non-XDP mode: API user have elevated refcnt.
394 * Many drivers split up the page into fragments, and some
395 * want to keep doing this to save memory and do refcnt based
396 * recycling. Support this use case too, to ease drivers
397 * switching between XDP/non-XDP.
399 * In-case page_pool maintains the DMA mapping, API user must
400 * call page_pool_put_page once. In this elevated refcnt
401 * case, the DMA is unmapped/released, as driver is likely
402 * doing refcnt based recycle tricks, meaning another process
403 * will be invoking put_page.
405 /* Do not replace this with page_pool_return_page() */
406 page_pool_release_page(pool, page);
409 EXPORT_SYMBOL(page_pool_put_page);
411 static void page_pool_empty_ring(struct page_pool *pool)
415 /* Empty recycle ring */
416 while ((page = ptr_ring_consume_bh(&pool->ring))) {
417 /* Verify the refcnt invariant of cached pages */
418 if (!(page_ref_count(page) == 1))
419 pr_crit("%s() page_pool refcnt %d violation\n",
420 __func__, page_ref_count(page));
422 page_pool_return_page(pool, page);
426 static void page_pool_free(struct page_pool *pool)
428 if (pool->disconnect)
429 pool->disconnect(pool);
431 ptr_ring_cleanup(&pool->ring, NULL);
433 if (pool->p.flags & PP_FLAG_DMA_MAP)
434 put_device(pool->p.dev);
439 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
443 if (pool->destroy_cnt)
446 /* Empty alloc cache, assume caller made sure this is
447 * no-longer in use, and page_pool_alloc_pages() cannot be
450 while (pool->alloc.count) {
451 page = pool->alloc.cache[--pool->alloc.count];
452 page_pool_return_page(pool, page);
456 static void page_pool_scrub(struct page_pool *pool)
458 page_pool_empty_alloc_cache_once(pool);
461 /* No more consumers should exist, but producers could still
464 page_pool_empty_ring(pool);
467 static int page_pool_release(struct page_pool *pool)
471 page_pool_scrub(pool);
472 inflight = page_pool_inflight(pool);
474 page_pool_free(pool);
479 static void page_pool_release_retry(struct work_struct *wq)
481 struct delayed_work *dwq = to_delayed_work(wq);
482 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
485 inflight = page_pool_release(pool);
489 /* Periodic warning */
490 if (time_after_eq(jiffies, pool->defer_warn)) {
491 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
493 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
494 __func__, inflight, sec);
495 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
498 /* Still not ready to be disconnected, retry later */
499 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
502 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
504 refcount_inc(&pool->user_cnt);
505 pool->disconnect = disconnect;
508 void page_pool_destroy(struct page_pool *pool)
513 if (!page_pool_put(pool))
516 if (!page_pool_release(pool))
519 pool->defer_start = jiffies;
520 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
522 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
523 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
525 EXPORT_SYMBOL(page_pool_destroy);
527 /* Caller must provide appropriate safe context, e.g. NAPI. */
528 void page_pool_update_nid(struct page_pool *pool, int new_nid)
532 trace_page_pool_update_nid(pool, new_nid);
533 pool->p.nid = new_nid;
535 /* Flush pool alloc cache, as refill will check NUMA node */
536 while (pool->alloc.count) {
537 page = pool->alloc.cache[--pool->alloc.count];
538 page_pool_return_page(pool, page);
541 EXPORT_SYMBOL(page_pool_update_nid);