Merge tag 'objtool-urgent-2021-05-15' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / net / core / page_pool.c
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * page_pool.c
4  *      Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5  *      Copyright (C) 2016 Red Hat, Inc.
6  */
7
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12
13 #include <net/page_pool.h>
14 #include <net/xdp.h>
15
16 #include <linux/dma-direction.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/page-flags.h>
19 #include <linux/mm.h> /* for __put_page() */
20
21 #include <trace/events/page_pool.h>
22
23 #define DEFER_TIME (msecs_to_jiffies(1000))
24 #define DEFER_WARN_INTERVAL (60 * HZ)
25
26 static int page_pool_init(struct page_pool *pool,
27                           const struct page_pool_params *params)
28 {
29         unsigned int ring_qsize = 1024; /* Default */
30
31         memcpy(&pool->p, params, sizeof(pool->p));
32
33         /* Validate only known flags were used */
34         if (pool->p.flags & ~(PP_FLAG_ALL))
35                 return -EINVAL;
36
37         if (pool->p.pool_size)
38                 ring_qsize = pool->p.pool_size;
39
40         /* Sanity limit mem that can be pinned down */
41         if (ring_qsize > 32768)
42                 return -E2BIG;
43
44         /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
45          * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
46          * which is the XDP_TX use-case.
47          */
48         if (pool->p.flags & PP_FLAG_DMA_MAP) {
49                 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
50                     (pool->p.dma_dir != DMA_BIDIRECTIONAL))
51                         return -EINVAL;
52         }
53
54         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
55                 /* In order to request DMA-sync-for-device the page
56                  * needs to be mapped
57                  */
58                 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
59                         return -EINVAL;
60
61                 if (!pool->p.max_len)
62                         return -EINVAL;
63
64                 /* pool->p.offset has to be set according to the address
65                  * offset used by the DMA engine to start copying rx data
66                  */
67         }
68
69         if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
70                 return -ENOMEM;
71
72         atomic_set(&pool->pages_state_release_cnt, 0);
73
74         /* Driver calling page_pool_create() also call page_pool_destroy() */
75         refcount_set(&pool->user_cnt, 1);
76
77         if (pool->p.flags & PP_FLAG_DMA_MAP)
78                 get_device(pool->p.dev);
79
80         return 0;
81 }
82
83 struct page_pool *page_pool_create(const struct page_pool_params *params)
84 {
85         struct page_pool *pool;
86         int err;
87
88         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
89         if (!pool)
90                 return ERR_PTR(-ENOMEM);
91
92         err = page_pool_init(pool, params);
93         if (err < 0) {
94                 pr_warn("%s() gave up with errno %d\n", __func__, err);
95                 kfree(pool);
96                 return ERR_PTR(err);
97         }
98
99         return pool;
100 }
101 EXPORT_SYMBOL(page_pool_create);
102
103 static void page_pool_return_page(struct page_pool *pool, struct page *page);
104
105 noinline
106 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
107 {
108         struct ptr_ring *r = &pool->ring;
109         struct page *page;
110         int pref_nid; /* preferred NUMA node */
111
112         /* Quicker fallback, avoid locks when ring is empty */
113         if (__ptr_ring_empty(r))
114                 return NULL;
115
116         /* Softirq guarantee CPU and thus NUMA node is stable. This,
117          * assumes CPU refilling driver RX-ring will also run RX-NAPI.
118          */
119 #ifdef CONFIG_NUMA
120         pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
121 #else
122         /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
123         pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
124 #endif
125
126         /* Slower-path: Get pages from locked ring queue */
127         spin_lock(&r->consumer_lock);
128
129         /* Refill alloc array, but only if NUMA match */
130         do {
131                 page = __ptr_ring_consume(r);
132                 if (unlikely(!page))
133                         break;
134
135                 if (likely(page_to_nid(page) == pref_nid)) {
136                         pool->alloc.cache[pool->alloc.count++] = page;
137                 } else {
138                         /* NUMA mismatch;
139                          * (1) release 1 page to page-allocator and
140                          * (2) break out to fallthrough to alloc_pages_node.
141                          * This limit stress on page buddy alloactor.
142                          */
143                         page_pool_return_page(pool, page);
144                         page = NULL;
145                         break;
146                 }
147         } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
148
149         /* Return last page */
150         if (likely(pool->alloc.count > 0))
151                 page = pool->alloc.cache[--pool->alloc.count];
152
153         spin_unlock(&r->consumer_lock);
154         return page;
155 }
156
157 /* fast path */
158 static struct page *__page_pool_get_cached(struct page_pool *pool)
159 {
160         struct page *page;
161
162         /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
163         if (likely(pool->alloc.count)) {
164                 /* Fast-path */
165                 page = pool->alloc.cache[--pool->alloc.count];
166         } else {
167                 page = page_pool_refill_alloc_cache(pool);
168         }
169
170         return page;
171 }
172
173 static void page_pool_dma_sync_for_device(struct page_pool *pool,
174                                           struct page *page,
175                                           unsigned int dma_sync_size)
176 {
177         dma_addr_t dma_addr = page_pool_get_dma_addr(page);
178
179         dma_sync_size = min(dma_sync_size, pool->p.max_len);
180         dma_sync_single_range_for_device(pool->p.dev, dma_addr,
181                                          pool->p.offset, dma_sync_size,
182                                          pool->p.dma_dir);
183 }
184
185 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
186 {
187         dma_addr_t dma;
188
189         /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
190          * since dma_addr_t can be either 32 or 64 bits and does not always fit
191          * into page private data (i.e 32bit cpu with 64bit DMA caps)
192          * This mapping is kept for lifetime of page, until leaving pool.
193          */
194         dma = dma_map_page_attrs(pool->p.dev, page, 0,
195                                  (PAGE_SIZE << pool->p.order),
196                                  pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
197         if (dma_mapping_error(pool->p.dev, dma))
198                 return false;
199
200         page_pool_set_dma_addr(page, dma);
201
202         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
203                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
204
205         return true;
206 }
207
208 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
209                                                  gfp_t gfp)
210 {
211         struct page *page;
212
213         gfp |= __GFP_COMP;
214         page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
215         if (unlikely(!page))
216                 return NULL;
217
218         if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
219             unlikely(!page_pool_dma_map(pool, page))) {
220                 put_page(page);
221                 return NULL;
222         }
223
224         /* Track how many pages are held 'in-flight' */
225         pool->pages_state_hold_cnt++;
226         trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
227         return page;
228 }
229
230 /* slow path */
231 noinline
232 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
233                                                  gfp_t gfp)
234 {
235         const int bulk = PP_ALLOC_CACHE_REFILL;
236         unsigned int pp_flags = pool->p.flags;
237         unsigned int pp_order = pool->p.order;
238         struct page *page;
239         int i, nr_pages;
240
241         /* Don't support bulk alloc for high-order pages */
242         if (unlikely(pp_order))
243                 return __page_pool_alloc_page_order(pool, gfp);
244
245         /* Unnecessary as alloc cache is empty, but guarantees zero count */
246         if (unlikely(pool->alloc.count > 0))
247                 return pool->alloc.cache[--pool->alloc.count];
248
249         /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
250         memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
251
252         nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache);
253         if (unlikely(!nr_pages))
254                 return NULL;
255
256         /* Pages have been filled into alloc.cache array, but count is zero and
257          * page element have not been (possibly) DMA mapped.
258          */
259         for (i = 0; i < nr_pages; i++) {
260                 page = pool->alloc.cache[i];
261                 if ((pp_flags & PP_FLAG_DMA_MAP) &&
262                     unlikely(!page_pool_dma_map(pool, page))) {
263                         put_page(page);
264                         continue;
265                 }
266                 pool->alloc.cache[pool->alloc.count++] = page;
267                 /* Track how many pages are held 'in-flight' */
268                 pool->pages_state_hold_cnt++;
269                 trace_page_pool_state_hold(pool, page,
270                                            pool->pages_state_hold_cnt);
271         }
272
273         /* Return last page */
274         if (likely(pool->alloc.count > 0))
275                 page = pool->alloc.cache[--pool->alloc.count];
276         else
277                 page = NULL;
278
279         /* When page just alloc'ed is should/must have refcnt 1. */
280         return page;
281 }
282
283 /* For using page_pool replace: alloc_pages() API calls, but provide
284  * synchronization guarantee for allocation side.
285  */
286 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
287 {
288         struct page *page;
289
290         /* Fast-path: Get a page from cache */
291         page = __page_pool_get_cached(pool);
292         if (page)
293                 return page;
294
295         /* Slow-path: cache empty, do real allocation */
296         page = __page_pool_alloc_pages_slow(pool, gfp);
297         return page;
298 }
299 EXPORT_SYMBOL(page_pool_alloc_pages);
300
301 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
302  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
303  */
304 #define _distance(a, b) (s32)((a) - (b))
305
306 static s32 page_pool_inflight(struct page_pool *pool)
307 {
308         u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
309         u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
310         s32 inflight;
311
312         inflight = _distance(hold_cnt, release_cnt);
313
314         trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
315         WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
316
317         return inflight;
318 }
319
320 /* Disconnects a page (from a page_pool).  API users can have a need
321  * to disconnect a page (from a page_pool), to allow it to be used as
322  * a regular page (that will eventually be returned to the normal
323  * page-allocator via put_page).
324  */
325 void page_pool_release_page(struct page_pool *pool, struct page *page)
326 {
327         dma_addr_t dma;
328         int count;
329
330         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
331                 /* Always account for inflight pages, even if we didn't
332                  * map them
333                  */
334                 goto skip_dma_unmap;
335
336         dma = page_pool_get_dma_addr(page);
337
338         /* When page is unmapped, it cannot be returned to our pool */
339         dma_unmap_page_attrs(pool->p.dev, dma,
340                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
341                              DMA_ATTR_SKIP_CPU_SYNC);
342         page_pool_set_dma_addr(page, 0);
343 skip_dma_unmap:
344         /* This may be the last page returned, releasing the pool, so
345          * it is not safe to reference pool afterwards.
346          */
347         count = atomic_inc_return(&pool->pages_state_release_cnt);
348         trace_page_pool_state_release(pool, page, count);
349 }
350 EXPORT_SYMBOL(page_pool_release_page);
351
352 /* Return a page to the page allocator, cleaning up our state */
353 static void page_pool_return_page(struct page_pool *pool, struct page *page)
354 {
355         page_pool_release_page(pool, page);
356
357         put_page(page);
358         /* An optimization would be to call __free_pages(page, pool->p.order)
359          * knowing page is not part of page-cache (thus avoiding a
360          * __page_cache_release() call).
361          */
362 }
363
364 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
365 {
366         int ret;
367         /* BH protection not needed if current is serving softirq */
368         if (in_serving_softirq())
369                 ret = ptr_ring_produce(&pool->ring, page);
370         else
371                 ret = ptr_ring_produce_bh(&pool->ring, page);
372
373         return (ret == 0) ? true : false;
374 }
375
376 /* Only allow direct recycling in special circumstances, into the
377  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
378  *
379  * Caller must provide appropriate safe context.
380  */
381 static bool page_pool_recycle_in_cache(struct page *page,
382                                        struct page_pool *pool)
383 {
384         if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
385                 return false;
386
387         /* Caller MUST have verified/know (page_ref_count(page) == 1) */
388         pool->alloc.cache[pool->alloc.count++] = page;
389         return true;
390 }
391
392 /* If the page refcnt == 1, this will try to recycle the page.
393  * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
394  * the configured size min(dma_sync_size, pool->max_len).
395  * If the page refcnt != 1, then the page will be returned to memory
396  * subsystem.
397  */
398 static __always_inline struct page *
399 __page_pool_put_page(struct page_pool *pool, struct page *page,
400                      unsigned int dma_sync_size, bool allow_direct)
401 {
402         /* This allocator is optimized for the XDP mode that uses
403          * one-frame-per-page, but have fallbacks that act like the
404          * regular page allocator APIs.
405          *
406          * refcnt == 1 means page_pool owns page, and can recycle it.
407          *
408          * page is NOT reusable when allocated when system is under
409          * some pressure. (page_is_pfmemalloc)
410          */
411         if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
412                 /* Read barrier done in page_ref_count / READ_ONCE */
413
414                 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
415                         page_pool_dma_sync_for_device(pool, page,
416                                                       dma_sync_size);
417
418                 if (allow_direct && in_serving_softirq() &&
419                     page_pool_recycle_in_cache(page, pool))
420                         return NULL;
421
422                 /* Page found as candidate for recycling */
423                 return page;
424         }
425         /* Fallback/non-XDP mode: API user have elevated refcnt.
426          *
427          * Many drivers split up the page into fragments, and some
428          * want to keep doing this to save memory and do refcnt based
429          * recycling. Support this use case too, to ease drivers
430          * switching between XDP/non-XDP.
431          *
432          * In-case page_pool maintains the DMA mapping, API user must
433          * call page_pool_put_page once.  In this elevated refcnt
434          * case, the DMA is unmapped/released, as driver is likely
435          * doing refcnt based recycle tricks, meaning another process
436          * will be invoking put_page.
437          */
438         /* Do not replace this with page_pool_return_page() */
439         page_pool_release_page(pool, page);
440         put_page(page);
441
442         return NULL;
443 }
444
445 void page_pool_put_page(struct page_pool *pool, struct page *page,
446                         unsigned int dma_sync_size, bool allow_direct)
447 {
448         page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
449         if (page && !page_pool_recycle_in_ring(pool, page)) {
450                 /* Cache full, fallback to free pages */
451                 page_pool_return_page(pool, page);
452         }
453 }
454 EXPORT_SYMBOL(page_pool_put_page);
455
456 /* Caller must not use data area after call, as this function overwrites it */
457 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
458                              int count)
459 {
460         int i, bulk_len = 0;
461
462         for (i = 0; i < count; i++) {
463                 struct page *page = virt_to_head_page(data[i]);
464
465                 page = __page_pool_put_page(pool, page, -1, false);
466                 /* Approved for bulk recycling in ptr_ring cache */
467                 if (page)
468                         data[bulk_len++] = page;
469         }
470
471         if (unlikely(!bulk_len))
472                 return;
473
474         /* Bulk producer into ptr_ring page_pool cache */
475         page_pool_ring_lock(pool);
476         for (i = 0; i < bulk_len; i++) {
477                 if (__ptr_ring_produce(&pool->ring, data[i]))
478                         break; /* ring full */
479         }
480         page_pool_ring_unlock(pool);
481
482         /* Hopefully all pages was return into ptr_ring */
483         if (likely(i == bulk_len))
484                 return;
485
486         /* ptr_ring cache full, free remaining pages outside producer lock
487          * since put_page() with refcnt == 1 can be an expensive operation
488          */
489         for (; i < bulk_len; i++)
490                 page_pool_return_page(pool, data[i]);
491 }
492 EXPORT_SYMBOL(page_pool_put_page_bulk);
493
494 static void page_pool_empty_ring(struct page_pool *pool)
495 {
496         struct page *page;
497
498         /* Empty recycle ring */
499         while ((page = ptr_ring_consume_bh(&pool->ring))) {
500                 /* Verify the refcnt invariant of cached pages */
501                 if (!(page_ref_count(page) == 1))
502                         pr_crit("%s() page_pool refcnt %d violation\n",
503                                 __func__, page_ref_count(page));
504
505                 page_pool_return_page(pool, page);
506         }
507 }
508
509 static void page_pool_free(struct page_pool *pool)
510 {
511         if (pool->disconnect)
512                 pool->disconnect(pool);
513
514         ptr_ring_cleanup(&pool->ring, NULL);
515
516         if (pool->p.flags & PP_FLAG_DMA_MAP)
517                 put_device(pool->p.dev);
518
519         kfree(pool);
520 }
521
522 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
523 {
524         struct page *page;
525
526         if (pool->destroy_cnt)
527                 return;
528
529         /* Empty alloc cache, assume caller made sure this is
530          * no-longer in use, and page_pool_alloc_pages() cannot be
531          * call concurrently.
532          */
533         while (pool->alloc.count) {
534                 page = pool->alloc.cache[--pool->alloc.count];
535                 page_pool_return_page(pool, page);
536         }
537 }
538
539 static void page_pool_scrub(struct page_pool *pool)
540 {
541         page_pool_empty_alloc_cache_once(pool);
542         pool->destroy_cnt++;
543
544         /* No more consumers should exist, but producers could still
545          * be in-flight.
546          */
547         page_pool_empty_ring(pool);
548 }
549
550 static int page_pool_release(struct page_pool *pool)
551 {
552         int inflight;
553
554         page_pool_scrub(pool);
555         inflight = page_pool_inflight(pool);
556         if (!inflight)
557                 page_pool_free(pool);
558
559         return inflight;
560 }
561
562 static void page_pool_release_retry(struct work_struct *wq)
563 {
564         struct delayed_work *dwq = to_delayed_work(wq);
565         struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
566         int inflight;
567
568         inflight = page_pool_release(pool);
569         if (!inflight)
570                 return;
571
572         /* Periodic warning */
573         if (time_after_eq(jiffies, pool->defer_warn)) {
574                 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
575
576                 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
577                         __func__, inflight, sec);
578                 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
579         }
580
581         /* Still not ready to be disconnected, retry later */
582         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
583 }
584
585 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
586 {
587         refcount_inc(&pool->user_cnt);
588         pool->disconnect = disconnect;
589 }
590
591 void page_pool_destroy(struct page_pool *pool)
592 {
593         if (!pool)
594                 return;
595
596         if (!page_pool_put(pool))
597                 return;
598
599         if (!page_pool_release(pool))
600                 return;
601
602         pool->defer_start = jiffies;
603         pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
604
605         INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
606         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
607 }
608 EXPORT_SYMBOL(page_pool_destroy);
609
610 /* Caller must provide appropriate safe context, e.g. NAPI. */
611 void page_pool_update_nid(struct page_pool *pool, int new_nid)
612 {
613         struct page *page;
614
615         trace_page_pool_update_nid(pool, new_nid);
616         pool->p.nid = new_nid;
617
618         /* Flush pool alloc cache, as refill will check NUMA node */
619         while (pool->alloc.count) {
620                 page = pool->alloc.cache[--pool->alloc.count];
621                 page_pool_return_page(pool, page);
622         }
623 }
624 EXPORT_SYMBOL(page_pool_update_nid);