Merge tag 'for-5.15-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
[linux-2.6-microblaze.git] / kernel / bpf / queue_stack_maps.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * queue_stack_maps.c: BPF queue and stack maps
4  *
5  * Copyright (c) 2018 Politecnico di Torino
6  */
7 #include <linux/bpf.h>
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/capability.h>
11 #include "percpu_freelist.h"
12
13 #define QUEUE_STACK_CREATE_FLAG_MASK \
14         (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
15
16 struct bpf_queue_stack {
17         struct bpf_map map;
18         raw_spinlock_t lock;
19         u32 head, tail;
20         u32 size; /* max_entries + 1 */
21
22         char elements[] __aligned(8);
23 };
24
25 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
26 {
27         return container_of(map, struct bpf_queue_stack, map);
28 }
29
30 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
31 {
32         return qs->head == qs->tail;
33 }
34
35 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
36 {
37         u32 head = qs->head + 1;
38
39         if (unlikely(head >= qs->size))
40                 head = 0;
41
42         return head == qs->tail;
43 }
44
45 /* Called from syscall */
46 static int queue_stack_map_alloc_check(union bpf_attr *attr)
47 {
48         if (!bpf_capable())
49                 return -EPERM;
50
51         /* check sanity of attributes */
52         if (attr->max_entries == 0 || attr->key_size != 0 ||
53             attr->value_size == 0 ||
54             attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
55             !bpf_map_flags_access_ok(attr->map_flags))
56                 return -EINVAL;
57
58         if (attr->value_size > KMALLOC_MAX_SIZE)
59                 /* if value_size is bigger, the user space won't be able to
60                  * access the elements.
61                  */
62                 return -E2BIG;
63
64         return 0;
65 }
66
67 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
68 {
69         int numa_node = bpf_map_attr_numa_node(attr);
70         struct bpf_queue_stack *qs;
71         u64 size, queue_size;
72
73         size = (u64) attr->max_entries + 1;
74         queue_size = sizeof(*qs) + size * attr->value_size;
75
76         qs = bpf_map_area_alloc(queue_size, numa_node);
77         if (!qs)
78                 return ERR_PTR(-ENOMEM);
79
80         memset(qs, 0, sizeof(*qs));
81
82         bpf_map_init_from_attr(&qs->map, attr);
83
84         qs->size = size;
85
86         raw_spin_lock_init(&qs->lock);
87
88         return &qs->map;
89 }
90
91 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
92 static void queue_stack_map_free(struct bpf_map *map)
93 {
94         struct bpf_queue_stack *qs = bpf_queue_stack(map);
95
96         bpf_map_area_free(qs);
97 }
98
99 static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
100 {
101         struct bpf_queue_stack *qs = bpf_queue_stack(map);
102         unsigned long flags;
103         int err = 0;
104         void *ptr;
105
106         raw_spin_lock_irqsave(&qs->lock, flags);
107
108         if (queue_stack_map_is_empty(qs)) {
109                 memset(value, 0, qs->map.value_size);
110                 err = -ENOENT;
111                 goto out;
112         }
113
114         ptr = &qs->elements[qs->tail * qs->map.value_size];
115         memcpy(value, ptr, qs->map.value_size);
116
117         if (delete) {
118                 if (unlikely(++qs->tail >= qs->size))
119                         qs->tail = 0;
120         }
121
122 out:
123         raw_spin_unlock_irqrestore(&qs->lock, flags);
124         return err;
125 }
126
127
128 static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
129 {
130         struct bpf_queue_stack *qs = bpf_queue_stack(map);
131         unsigned long flags;
132         int err = 0;
133         void *ptr;
134         u32 index;
135
136         raw_spin_lock_irqsave(&qs->lock, flags);
137
138         if (queue_stack_map_is_empty(qs)) {
139                 memset(value, 0, qs->map.value_size);
140                 err = -ENOENT;
141                 goto out;
142         }
143
144         index = qs->head - 1;
145         if (unlikely(index >= qs->size))
146                 index = qs->size - 1;
147
148         ptr = &qs->elements[index * qs->map.value_size];
149         memcpy(value, ptr, qs->map.value_size);
150
151         if (delete)
152                 qs->head = index;
153
154 out:
155         raw_spin_unlock_irqrestore(&qs->lock, flags);
156         return err;
157 }
158
159 /* Called from syscall or from eBPF program */
160 static int queue_map_peek_elem(struct bpf_map *map, void *value)
161 {
162         return __queue_map_get(map, value, false);
163 }
164
165 /* Called from syscall or from eBPF program */
166 static int stack_map_peek_elem(struct bpf_map *map, void *value)
167 {
168         return __stack_map_get(map, value, false);
169 }
170
171 /* Called from syscall or from eBPF program */
172 static int queue_map_pop_elem(struct bpf_map *map, void *value)
173 {
174         return __queue_map_get(map, value, true);
175 }
176
177 /* Called from syscall or from eBPF program */
178 static int stack_map_pop_elem(struct bpf_map *map, void *value)
179 {
180         return __stack_map_get(map, value, true);
181 }
182
183 /* Called from syscall or from eBPF program */
184 static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
185                                      u64 flags)
186 {
187         struct bpf_queue_stack *qs = bpf_queue_stack(map);
188         unsigned long irq_flags;
189         int err = 0;
190         void *dst;
191
192         /* BPF_EXIST is used to force making room for a new element in case the
193          * map is full
194          */
195         bool replace = (flags & BPF_EXIST);
196
197         /* Check supported flags for queue and stack maps */
198         if (flags & BPF_NOEXIST || flags > BPF_EXIST)
199                 return -EINVAL;
200
201         raw_spin_lock_irqsave(&qs->lock, irq_flags);
202
203         if (queue_stack_map_is_full(qs)) {
204                 if (!replace) {
205                         err = -E2BIG;
206                         goto out;
207                 }
208                 /* advance tail pointer to overwrite oldest element */
209                 if (unlikely(++qs->tail >= qs->size))
210                         qs->tail = 0;
211         }
212
213         dst = &qs->elements[qs->head * qs->map.value_size];
214         memcpy(dst, value, qs->map.value_size);
215
216         if (unlikely(++qs->head >= qs->size))
217                 qs->head = 0;
218
219 out:
220         raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
221         return err;
222 }
223
224 /* Called from syscall or from eBPF program */
225 static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
226 {
227         return NULL;
228 }
229
230 /* Called from syscall or from eBPF program */
231 static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
232                                        void *value, u64 flags)
233 {
234         return -EINVAL;
235 }
236
237 /* Called from syscall or from eBPF program */
238 static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
239 {
240         return -EINVAL;
241 }
242
243 /* Called from syscall */
244 static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
245                                         void *next_key)
246 {
247         return -EINVAL;
248 }
249
250 static int queue_map_btf_id;
251 const struct bpf_map_ops queue_map_ops = {
252         .map_meta_equal = bpf_map_meta_equal,
253         .map_alloc_check = queue_stack_map_alloc_check,
254         .map_alloc = queue_stack_map_alloc,
255         .map_free = queue_stack_map_free,
256         .map_lookup_elem = queue_stack_map_lookup_elem,
257         .map_update_elem = queue_stack_map_update_elem,
258         .map_delete_elem = queue_stack_map_delete_elem,
259         .map_push_elem = queue_stack_map_push_elem,
260         .map_pop_elem = queue_map_pop_elem,
261         .map_peek_elem = queue_map_peek_elem,
262         .map_get_next_key = queue_stack_map_get_next_key,
263         .map_btf_name = "bpf_queue_stack",
264         .map_btf_id = &queue_map_btf_id,
265 };
266
267 static int stack_map_btf_id;
268 const struct bpf_map_ops stack_map_ops = {
269         .map_meta_equal = bpf_map_meta_equal,
270         .map_alloc_check = queue_stack_map_alloc_check,
271         .map_alloc = queue_stack_map_alloc,
272         .map_free = queue_stack_map_free,
273         .map_lookup_elem = queue_stack_map_lookup_elem,
274         .map_update_elem = queue_stack_map_update_elem,
275         .map_delete_elem = queue_stack_map_delete_elem,
276         .map_push_elem = queue_stack_map_push_elem,
277         .map_pop_elem = stack_map_pop_elem,
278         .map_peek_elem = stack_map_peek_elem,
279         .map_get_next_key = queue_stack_map_get_next_key,
280         .map_btf_name = "bpf_queue_stack",
281         .map_btf_id = &stack_map_btf_id,
282 };