Merge tag 'csky-for-linus-5.7-rc1' of git://github.com/c-sky/csky-linux
[linux-2.6-microblaze.git] / net / xdp / xsk_queue.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space ring structure
3  * Copyright(c) 2018 Intel Corporation.
4  */
5
6 #include <linux/log2.h>
7 #include <linux/slab.h>
8 #include <linux/overflow.h>
9
10 #include "xsk_queue.h"
11
12 void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
13 {
14         if (!q)
15                 return;
16
17         q->size = size;
18         q->chunk_mask = chunk_mask;
19 }
20
21 static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
22 {
23         struct xdp_umem_ring *umem_ring;
24         struct xdp_rxtx_ring *rxtx_ring;
25
26         if (umem_queue)
27                 return struct_size(umem_ring, desc, q->nentries);
28         return struct_size(rxtx_ring, desc, q->nentries);
29 }
30
31 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
32 {
33         struct xsk_queue *q;
34         gfp_t gfp_flags;
35         size_t size;
36
37         q = kzalloc(sizeof(*q), GFP_KERNEL);
38         if (!q)
39                 return NULL;
40
41         q->nentries = nentries;
42         q->ring_mask = nentries - 1;
43
44         gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
45                     __GFP_COMP  | __GFP_NORETRY;
46         size = xskq_get_ring_size(q, umem_queue);
47
48         q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
49                                                       get_order(size));
50         if (!q->ring) {
51                 kfree(q);
52                 return NULL;
53         }
54
55         return q;
56 }
57
58 void xskq_destroy(struct xsk_queue *q)
59 {
60         if (!q)
61                 return;
62
63         page_frag_free(q->ring);
64         kfree(q);
65 }
66
67 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
68 {
69         struct xdp_umem_fq_reuse *newq;
70
71         /* Check for overflow */
72         if (nentries > (u32)roundup_pow_of_two(nentries))
73                 return NULL;
74         nentries = roundup_pow_of_two(nentries);
75
76         newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
77         if (!newq)
78                 return NULL;
79         memset(newq, 0, offsetof(typeof(*newq), handles));
80
81         newq->nentries = nentries;
82         return newq;
83 }
84 EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
85
86 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
87                                           struct xdp_umem_fq_reuse *newq)
88 {
89         struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
90
91         if (!oldq) {
92                 umem->fq_reuse = newq;
93                 return NULL;
94         }
95
96         if (newq->nentries < oldq->length)
97                 return newq;
98
99         memcpy(newq->handles, oldq->handles,
100                array_size(oldq->length, sizeof(u64)));
101         newq->length = oldq->length;
102
103         umem->fq_reuse = newq;
104         return oldq;
105 }
106 EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
107
108 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
109 {
110         kvfree(rq);
111 }
112 EXPORT_SYMBOL_GPL(xsk_reuseq_free);
113
114 void xsk_reuseq_destroy(struct xdp_umem *umem)
115 {
116         xsk_reuseq_free(umem->fq_reuse);
117         umem->fq_reuse = NULL;
118 }