Merge tag 'docs-5.6-fixes' of git://git.lwn.net/linux
[linux-2.6-microblaze.git] / net / core / page_pool.c
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * page_pool.c
4  *      Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5  *      Copyright (C) 2016 Red Hat, Inc.
6  */
7
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12
13 #include <net/page_pool.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/page-flags.h>
17 #include <linux/mm.h> /* for __put_page() */
18
19 #include <trace/events/page_pool.h>
20
21 #define DEFER_TIME (msecs_to_jiffies(1000))
22 #define DEFER_WARN_INTERVAL (60 * HZ)
23
24 static int page_pool_init(struct page_pool *pool,
25                           const struct page_pool_params *params)
26 {
27         unsigned int ring_qsize = 1024; /* Default */
28
29         memcpy(&pool->p, params, sizeof(pool->p));
30
31         /* Validate only known flags were used */
32         if (pool->p.flags & ~(PP_FLAG_ALL))
33                 return -EINVAL;
34
35         if (pool->p.pool_size)
36                 ring_qsize = pool->p.pool_size;
37
38         /* Sanity limit mem that can be pinned down */
39         if (ring_qsize > 32768)
40                 return -E2BIG;
41
42         /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
43          * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
44          * which is the XDP_TX use-case.
45          */
46         if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
47             (pool->p.dma_dir != DMA_BIDIRECTIONAL))
48                 return -EINVAL;
49
50         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
51                 /* In order to request DMA-sync-for-device the page
52                  * needs to be mapped
53                  */
54                 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
55                         return -EINVAL;
56
57                 if (!pool->p.max_len)
58                         return -EINVAL;
59
60                 /* pool->p.offset has to be set according to the address
61                  * offset used by the DMA engine to start copying rx data
62                  */
63         }
64
65         if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
66                 return -ENOMEM;
67
68         atomic_set(&pool->pages_state_release_cnt, 0);
69
70         /* Driver calling page_pool_create() also call page_pool_destroy() */
71         refcount_set(&pool->user_cnt, 1);
72
73         if (pool->p.flags & PP_FLAG_DMA_MAP)
74                 get_device(pool->p.dev);
75
76         return 0;
77 }
78
79 struct page_pool *page_pool_create(const struct page_pool_params *params)
80 {
81         struct page_pool *pool;
82         int err;
83
84         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
85         if (!pool)
86                 return ERR_PTR(-ENOMEM);
87
88         err = page_pool_init(pool, params);
89         if (err < 0) {
90                 pr_warn("%s() gave up with errno %d\n", __func__, err);
91                 kfree(pool);
92                 return ERR_PTR(err);
93         }
94
95         return pool;
96 }
97 EXPORT_SYMBOL(page_pool_create);
98
99 static void __page_pool_return_page(struct page_pool *pool, struct page *page);
100
101 noinline
102 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
103 {
104         struct ptr_ring *r = &pool->ring;
105         struct page *page;
106         int pref_nid; /* preferred NUMA node */
107
108         /* Quicker fallback, avoid locks when ring is empty */
109         if (__ptr_ring_empty(r))
110                 return NULL;
111
112         /* Softirq guarantee CPU and thus NUMA node is stable. This,
113          * assumes CPU refilling driver RX-ring will also run RX-NAPI.
114          */
115 #ifdef CONFIG_NUMA
116         pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
117 #else
118         /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
119         pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
120 #endif
121
122         /* Slower-path: Get pages from locked ring queue */
123         spin_lock(&r->consumer_lock);
124
125         /* Refill alloc array, but only if NUMA match */
126         do {
127                 page = __ptr_ring_consume(r);
128                 if (unlikely(!page))
129                         break;
130
131                 if (likely(page_to_nid(page) == pref_nid)) {
132                         pool->alloc.cache[pool->alloc.count++] = page;
133                 } else {
134                         /* NUMA mismatch;
135                          * (1) release 1 page to page-allocator and
136                          * (2) break out to fallthrough to alloc_pages_node.
137                          * This limit stress on page buddy alloactor.
138                          */
139                         __page_pool_return_page(pool, page);
140                         page = NULL;
141                         break;
142                 }
143         } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
144
145         /* Return last page */
146         if (likely(pool->alloc.count > 0))
147                 page = pool->alloc.cache[--pool->alloc.count];
148
149         spin_unlock(&r->consumer_lock);
150         return page;
151 }
152
153 /* fast path */
154 static struct page *__page_pool_get_cached(struct page_pool *pool)
155 {
156         struct page *page;
157
158         /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
159         if (likely(pool->alloc.count)) {
160                 /* Fast-path */
161                 page = pool->alloc.cache[--pool->alloc.count];
162         } else {
163                 page = page_pool_refill_alloc_cache(pool);
164         }
165
166         return page;
167 }
168
169 static void page_pool_dma_sync_for_device(struct page_pool *pool,
170                                           struct page *page,
171                                           unsigned int dma_sync_size)
172 {
173         dma_sync_size = min(dma_sync_size, pool->p.max_len);
174         dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
175                                          pool->p.offset, dma_sync_size,
176                                          pool->p.dma_dir);
177 }
178
179 /* slow path */
180 noinline
181 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
182                                                  gfp_t _gfp)
183 {
184         struct page *page;
185         gfp_t gfp = _gfp;
186         dma_addr_t dma;
187
188         /* We could always set __GFP_COMP, and avoid this branch, as
189          * prep_new_page() can handle order-0 with __GFP_COMP.
190          */
191         if (pool->p.order)
192                 gfp |= __GFP_COMP;
193
194         /* FUTURE development:
195          *
196          * Current slow-path essentially falls back to single page
197          * allocations, which doesn't improve performance.  This code
198          * need bulk allocation support from the page allocator code.
199          */
200
201         /* Cache was empty, do real allocation */
202 #ifdef CONFIG_NUMA
203         page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
204 #else
205         page = alloc_pages(gfp, pool->p.order);
206 #endif
207         if (!page)
208                 return NULL;
209
210         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
211                 goto skip_dma_map;
212
213         /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
214          * since dma_addr_t can be either 32 or 64 bits and does not always fit
215          * into page private data (i.e 32bit cpu with 64bit DMA caps)
216          * This mapping is kept for lifetime of page, until leaving pool.
217          */
218         dma = dma_map_page_attrs(pool->p.dev, page, 0,
219                                  (PAGE_SIZE << pool->p.order),
220                                  pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
221         if (dma_mapping_error(pool->p.dev, dma)) {
222                 put_page(page);
223                 return NULL;
224         }
225         page->dma_addr = dma;
226
227         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
228                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
229
230 skip_dma_map:
231         /* Track how many pages are held 'in-flight' */
232         pool->pages_state_hold_cnt++;
233
234         trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
235
236         /* When page just alloc'ed is should/must have refcnt 1. */
237         return page;
238 }
239
240 /* For using page_pool replace: alloc_pages() API calls, but provide
241  * synchronization guarantee for allocation side.
242  */
243 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
244 {
245         struct page *page;
246
247         /* Fast-path: Get a page from cache */
248         page = __page_pool_get_cached(pool);
249         if (page)
250                 return page;
251
252         /* Slow-path: cache empty, do real allocation */
253         page = __page_pool_alloc_pages_slow(pool, gfp);
254         return page;
255 }
256 EXPORT_SYMBOL(page_pool_alloc_pages);
257
258 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
259  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
260  */
261 #define _distance(a, b) (s32)((a) - (b))
262
263 static s32 page_pool_inflight(struct page_pool *pool)
264 {
265         u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
266         u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
267         s32 inflight;
268
269         inflight = _distance(hold_cnt, release_cnt);
270
271         trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
272         WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
273
274         return inflight;
275 }
276
277 /* Cleanup page_pool state from page */
278 static void __page_pool_clean_page(struct page_pool *pool,
279                                    struct page *page)
280 {
281         dma_addr_t dma;
282         int count;
283
284         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
285                 goto skip_dma_unmap;
286
287         dma = page->dma_addr;
288         /* DMA unmap */
289         dma_unmap_page_attrs(pool->p.dev, dma,
290                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
291                              DMA_ATTR_SKIP_CPU_SYNC);
292         page->dma_addr = 0;
293 skip_dma_unmap:
294         /* This may be the last page returned, releasing the pool, so
295          * it is not safe to reference pool afterwards.
296          */
297         count = atomic_inc_return(&pool->pages_state_release_cnt);
298         trace_page_pool_state_release(pool, page, count);
299 }
300
301 /* unmap the page and clean our state */
302 void page_pool_unmap_page(struct page_pool *pool, struct page *page)
303 {
304         /* When page is unmapped, this implies page will not be
305          * returned to page_pool.
306          */
307         __page_pool_clean_page(pool, page);
308 }
309 EXPORT_SYMBOL(page_pool_unmap_page);
310
311 /* Return a page to the page allocator, cleaning up our state */
312 static void __page_pool_return_page(struct page_pool *pool, struct page *page)
313 {
314         __page_pool_clean_page(pool, page);
315
316         put_page(page);
317         /* An optimization would be to call __free_pages(page, pool->p.order)
318          * knowing page is not part of page-cache (thus avoiding a
319          * __page_cache_release() call).
320          */
321 }
322
323 static bool __page_pool_recycle_into_ring(struct page_pool *pool,
324                                    struct page *page)
325 {
326         int ret;
327         /* BH protection not needed if current is serving softirq */
328         if (in_serving_softirq())
329                 ret = ptr_ring_produce(&pool->ring, page);
330         else
331                 ret = ptr_ring_produce_bh(&pool->ring, page);
332
333         return (ret == 0) ? true : false;
334 }
335
336 /* Only allow direct recycling in special circumstances, into the
337  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
338  *
339  * Caller must provide appropriate safe context.
340  */
341 static bool __page_pool_recycle_direct(struct page *page,
342                                        struct page_pool *pool)
343 {
344         if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
345                 return false;
346
347         /* Caller MUST have verified/know (page_ref_count(page) == 1) */
348         pool->alloc.cache[pool->alloc.count++] = page;
349         return true;
350 }
351
352 /* page is NOT reusable when:
353  * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
354  */
355 static bool pool_page_reusable(struct page_pool *pool, struct page *page)
356 {
357         return !page_is_pfmemalloc(page);
358 }
359
360 void __page_pool_put_page(struct page_pool *pool, struct page *page,
361                           unsigned int dma_sync_size, bool allow_direct)
362 {
363         /* This allocator is optimized for the XDP mode that uses
364          * one-frame-per-page, but have fallbacks that act like the
365          * regular page allocator APIs.
366          *
367          * refcnt == 1 means page_pool owns page, and can recycle it.
368          */
369         if (likely(page_ref_count(page) == 1 &&
370                    pool_page_reusable(pool, page))) {
371                 /* Read barrier done in page_ref_count / READ_ONCE */
372
373                 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
374                         page_pool_dma_sync_for_device(pool, page,
375                                                       dma_sync_size);
376
377                 if (allow_direct && in_serving_softirq())
378                         if (__page_pool_recycle_direct(page, pool))
379                                 return;
380
381                 if (!__page_pool_recycle_into_ring(pool, page)) {
382                         /* Cache full, fallback to free pages */
383                         __page_pool_return_page(pool, page);
384                 }
385                 return;
386         }
387         /* Fallback/non-XDP mode: API user have elevated refcnt.
388          *
389          * Many drivers split up the page into fragments, and some
390          * want to keep doing this to save memory and do refcnt based
391          * recycling. Support this use case too, to ease drivers
392          * switching between XDP/non-XDP.
393          *
394          * In-case page_pool maintains the DMA mapping, API user must
395          * call page_pool_put_page once.  In this elevated refcnt
396          * case, the DMA is unmapped/released, as driver is likely
397          * doing refcnt based recycle tricks, meaning another process
398          * will be invoking put_page.
399          */
400         __page_pool_clean_page(pool, page);
401         put_page(page);
402 }
403 EXPORT_SYMBOL(__page_pool_put_page);
404
405 static void __page_pool_empty_ring(struct page_pool *pool)
406 {
407         struct page *page;
408
409         /* Empty recycle ring */
410         while ((page = ptr_ring_consume_bh(&pool->ring))) {
411                 /* Verify the refcnt invariant of cached pages */
412                 if (!(page_ref_count(page) == 1))
413                         pr_crit("%s() page_pool refcnt %d violation\n",
414                                 __func__, page_ref_count(page));
415
416                 __page_pool_return_page(pool, page);
417         }
418 }
419
420 static void page_pool_free(struct page_pool *pool)
421 {
422         if (pool->disconnect)
423                 pool->disconnect(pool);
424
425         ptr_ring_cleanup(&pool->ring, NULL);
426
427         if (pool->p.flags & PP_FLAG_DMA_MAP)
428                 put_device(pool->p.dev);
429
430         kfree(pool);
431 }
432
433 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
434 {
435         struct page *page;
436
437         if (pool->destroy_cnt)
438                 return;
439
440         /* Empty alloc cache, assume caller made sure this is
441          * no-longer in use, and page_pool_alloc_pages() cannot be
442          * call concurrently.
443          */
444         while (pool->alloc.count) {
445                 page = pool->alloc.cache[--pool->alloc.count];
446                 __page_pool_return_page(pool, page);
447         }
448 }
449
450 static void page_pool_scrub(struct page_pool *pool)
451 {
452         page_pool_empty_alloc_cache_once(pool);
453         pool->destroy_cnt++;
454
455         /* No more consumers should exist, but producers could still
456          * be in-flight.
457          */
458         __page_pool_empty_ring(pool);
459 }
460
461 static int page_pool_release(struct page_pool *pool)
462 {
463         int inflight;
464
465         page_pool_scrub(pool);
466         inflight = page_pool_inflight(pool);
467         if (!inflight)
468                 page_pool_free(pool);
469
470         return inflight;
471 }
472
473 static void page_pool_release_retry(struct work_struct *wq)
474 {
475         struct delayed_work *dwq = to_delayed_work(wq);
476         struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
477         int inflight;
478
479         inflight = page_pool_release(pool);
480         if (!inflight)
481                 return;
482
483         /* Periodic warning */
484         if (time_after_eq(jiffies, pool->defer_warn)) {
485                 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
486
487                 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
488                         __func__, inflight, sec);
489                 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
490         }
491
492         /* Still not ready to be disconnected, retry later */
493         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
494 }
495
496 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
497 {
498         refcount_inc(&pool->user_cnt);
499         pool->disconnect = disconnect;
500 }
501
502 void page_pool_destroy(struct page_pool *pool)
503 {
504         if (!pool)
505                 return;
506
507         if (!page_pool_put(pool))
508                 return;
509
510         if (!page_pool_release(pool))
511                 return;
512
513         pool->defer_start = jiffies;
514         pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
515
516         INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
517         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
518 }
519 EXPORT_SYMBOL(page_pool_destroy);
520
521 /* Caller must provide appropriate safe context, e.g. NAPI. */
522 void page_pool_update_nid(struct page_pool *pool, int new_nid)
523 {
524         struct page *page;
525
526         trace_page_pool_update_nid(pool, new_nid);
527         pool->p.nid = new_nid;
528
529         /* Flush pool alloc cache, as refill will check NUMA node */
530         while (pool->alloc.count) {
531                 page = pool->alloc.cache[--pool->alloc.count];
532                 __page_pool_return_page(pool, page);
533         }
534 }
535 EXPORT_SYMBOL(page_pool_update_nid);