4 #include <linux/irq_work.h>
5 #include <linux/slab.h>
6 #include <linux/filter.h>
8 #include <linux/vmalloc.h>
9 #include <linux/wait.h>
10 #include <linux/poll.h>
11 #include <uapi/linux/btf.h>
13 #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
15 /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
16 #define RINGBUF_PGOFF \
17 (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
18 /* consumer page and producer page */
19 #define RINGBUF_POS_PAGES 2
21 #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
23 /* Maximum size of ring buffer area is limited by 32-bit page offset within
24 * record header, counted in pages. Reserve 8 bits for extensibility, and take
25 * into account few extra pages for consumer/producer pages and
26 * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single
29 #define RINGBUF_MAX_DATA_SZ \
30 (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
33 wait_queue_head_t waitq;
38 spinlock_t spinlock ____cacheline_aligned_in_smp;
39 /* Consumer and producer counters are put into separate pages to allow
40 * mapping consumer page as r/w, but restrict producer page to r/o.
41 * This protects producer position from being modified by user-space
42 * application and ruining in-kernel position tracking.
44 unsigned long consumer_pos __aligned(PAGE_SIZE);
45 unsigned long producer_pos __aligned(PAGE_SIZE);
46 char data[] __aligned(PAGE_SIZE);
49 struct bpf_ringbuf_map {
51 struct bpf_map_memory memory;
52 struct bpf_ringbuf *rb;
55 /* 8-byte ring buffer record header structure */
56 struct bpf_ringbuf_hdr {
61 static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
63 const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
64 __GFP_NOWARN | __GFP_ZERO;
65 int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
66 int nr_data_pages = data_sz >> PAGE_SHIFT;
67 int nr_pages = nr_meta_pages + nr_data_pages;
68 struct page **pages, *page;
69 struct bpf_ringbuf *rb;
73 /* Each data page is mapped twice to allow "virtual"
74 * continuous read of samples wrapping around the end of ring
76 * ------------------------------------------------------
77 * | meta pages | real data pages | same data pages |
78 * ------------------------------------------------------
79 * | | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
80 * ------------------------------------------------------
82 * ------------------------------------------------------
85 * Here, no need to worry about special handling of wrapped-around
86 * data due to double-mapped data pages. This works both in kernel and
87 * when mmap()'ed in user-space, simplifying both kernel and
88 * user-space implementations significantly.
90 array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
91 pages = bpf_map_area_alloc(array_size, numa_node);
95 for (i = 0; i < nr_pages; i++) {
96 page = alloc_pages_node(numa_node, flags, 0);
102 if (i >= nr_meta_pages)
103 pages[nr_data_pages + i] = page;
106 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
107 VM_ALLOC | VM_USERMAP, PAGE_KERNEL);
110 rb->nr_pages = nr_pages;
115 for (i = 0; i < nr_pages; i++)
116 __free_page(pages[i]);
121 static void bpf_ringbuf_notify(struct irq_work *work)
123 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
125 wake_up_all(&rb->waitq);
128 static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
130 struct bpf_ringbuf *rb;
132 rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
134 return ERR_PTR(-ENOMEM);
136 spin_lock_init(&rb->spinlock);
137 init_waitqueue_head(&rb->waitq);
138 init_irq_work(&rb->work, bpf_ringbuf_notify);
140 rb->mask = data_sz - 1;
141 rb->consumer_pos = 0;
142 rb->producer_pos = 0;
147 static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
149 struct bpf_ringbuf_map *rb_map;
153 if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
154 return ERR_PTR(-EINVAL);
156 if (attr->key_size || attr->value_size ||
157 !is_power_of_2(attr->max_entries) ||
158 !PAGE_ALIGNED(attr->max_entries))
159 return ERR_PTR(-EINVAL);
162 /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
163 if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
164 return ERR_PTR(-E2BIG);
167 rb_map = kzalloc(sizeof(*rb_map), GFP_USER | __GFP_ACCOUNT);
169 return ERR_PTR(-ENOMEM);
171 bpf_map_init_from_attr(&rb_map->map, attr);
173 cost = sizeof(struct bpf_ringbuf_map) +
174 sizeof(struct bpf_ringbuf) +
176 err = bpf_map_charge_init(&rb_map->map.memory, cost);
180 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
181 if (IS_ERR(rb_map->rb)) {
182 err = PTR_ERR(rb_map->rb);
189 bpf_map_charge_finish(&rb_map->map.memory);
195 static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
197 /* copy pages pointer and nr_pages to local variable, as we are going
198 * to unmap rb itself with vunmap() below
200 struct page **pages = rb->pages;
201 int i, nr_pages = rb->nr_pages;
204 for (i = 0; i < nr_pages; i++)
205 __free_page(pages[i]);
209 static void ringbuf_map_free(struct bpf_map *map)
211 struct bpf_ringbuf_map *rb_map;
213 rb_map = container_of(map, struct bpf_ringbuf_map, map);
214 bpf_ringbuf_free(rb_map->rb);
218 static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
220 return ERR_PTR(-ENOTSUPP);
223 static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
229 static int ringbuf_map_delete_elem(struct bpf_map *map, void *key)
234 static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
240 static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
242 size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
244 /* consumer page + producer page + 2 x data pages */
245 return RINGBUF_POS_PAGES + 2 * data_pages;
248 static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
250 struct bpf_ringbuf_map *rb_map;
253 rb_map = container_of(map, struct bpf_ringbuf_map, map);
254 mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
256 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
259 return remap_vmalloc_range(vma, rb_map->rb,
260 vma->vm_pgoff + RINGBUF_PGOFF);
263 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
265 unsigned long cons_pos, prod_pos;
267 cons_pos = smp_load_acquire(&rb->consumer_pos);
268 prod_pos = smp_load_acquire(&rb->producer_pos);
269 return prod_pos - cons_pos;
272 static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
273 struct poll_table_struct *pts)
275 struct bpf_ringbuf_map *rb_map;
277 rb_map = container_of(map, struct bpf_ringbuf_map, map);
278 poll_wait(filp, &rb_map->rb->waitq, pts);
280 if (ringbuf_avail_data_sz(rb_map->rb))
281 return EPOLLIN | EPOLLRDNORM;
285 static int ringbuf_map_btf_id;
286 const struct bpf_map_ops ringbuf_map_ops = {
287 .map_meta_equal = bpf_map_meta_equal,
288 .map_alloc = ringbuf_map_alloc,
289 .map_free = ringbuf_map_free,
290 .map_mmap = ringbuf_map_mmap,
291 .map_poll = ringbuf_map_poll,
292 .map_lookup_elem = ringbuf_map_lookup_elem,
293 .map_update_elem = ringbuf_map_update_elem,
294 .map_delete_elem = ringbuf_map_delete_elem,
295 .map_get_next_key = ringbuf_map_get_next_key,
296 .map_btf_name = "bpf_ringbuf_map",
297 .map_btf_id = &ringbuf_map_btf_id,
300 /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
301 * calculate offset from record metadata to ring buffer in pages, rounded
302 * down. This page offset is stored as part of record metadata and allows to
303 * restore struct bpf_ringbuf * from record pointer. This page offset is
304 * stored at offset 4 of record metadata header.
306 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
307 struct bpf_ringbuf_hdr *hdr)
309 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
312 /* Given pointer to ring buffer record header, restore pointer to struct
313 * bpf_ringbuf itself by using page offset stored at offset 4
315 static struct bpf_ringbuf *
316 bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
318 unsigned long addr = (unsigned long)(void *)hdr;
319 unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
321 return (void*)((addr & PAGE_MASK) - off);
324 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
326 unsigned long cons_pos, prod_pos, new_prod_pos, flags;
328 struct bpf_ringbuf_hdr *hdr;
330 if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
333 len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
334 cons_pos = smp_load_acquire(&rb->consumer_pos);
337 if (!spin_trylock_irqsave(&rb->spinlock, flags))
340 spin_lock_irqsave(&rb->spinlock, flags);
343 prod_pos = rb->producer_pos;
344 new_prod_pos = prod_pos + len;
346 /* check for out of ringbuf space by ensuring producer position
347 * doesn't advance more than (ringbuf_size - 1) ahead
349 if (new_prod_pos - cons_pos > rb->mask) {
350 spin_unlock_irqrestore(&rb->spinlock, flags);
354 hdr = (void *)rb->data + (prod_pos & rb->mask);
355 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
356 hdr->len = size | BPF_RINGBUF_BUSY_BIT;
357 hdr->pg_off = pg_off;
359 /* pairs with consumer's smp_load_acquire() */
360 smp_store_release(&rb->producer_pos, new_prod_pos);
362 spin_unlock_irqrestore(&rb->spinlock, flags);
364 return (void *)hdr + BPF_RINGBUF_HDR_SZ;
367 BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
369 struct bpf_ringbuf_map *rb_map;
374 rb_map = container_of(map, struct bpf_ringbuf_map, map);
375 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
378 const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
379 .func = bpf_ringbuf_reserve,
380 .ret_type = RET_PTR_TO_ALLOC_MEM_OR_NULL,
381 .arg1_type = ARG_CONST_MAP_PTR,
382 .arg2_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
383 .arg3_type = ARG_ANYTHING,
386 static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
388 unsigned long rec_pos, cons_pos;
389 struct bpf_ringbuf_hdr *hdr;
390 struct bpf_ringbuf *rb;
393 hdr = sample - BPF_RINGBUF_HDR_SZ;
394 rb = bpf_ringbuf_restore_from_rec(hdr);
395 new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
397 new_len |= BPF_RINGBUF_DISCARD_BIT;
399 /* update record header with correct final size prefix */
400 xchg(&hdr->len, new_len);
402 /* if consumer caught up and is waiting for our record, notify about
403 * new data availability
405 rec_pos = (void *)hdr - (void *)rb->data;
406 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
408 if (flags & BPF_RB_FORCE_WAKEUP)
409 irq_work_queue(&rb->work);
410 else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
411 irq_work_queue(&rb->work);
414 BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
416 bpf_ringbuf_commit(sample, flags, false /* discard */);
420 const struct bpf_func_proto bpf_ringbuf_submit_proto = {
421 .func = bpf_ringbuf_submit,
422 .ret_type = RET_VOID,
423 .arg1_type = ARG_PTR_TO_ALLOC_MEM,
424 .arg2_type = ARG_ANYTHING,
427 BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
429 bpf_ringbuf_commit(sample, flags, true /* discard */);
433 const struct bpf_func_proto bpf_ringbuf_discard_proto = {
434 .func = bpf_ringbuf_discard,
435 .ret_type = RET_VOID,
436 .arg1_type = ARG_PTR_TO_ALLOC_MEM,
437 .arg2_type = ARG_ANYTHING,
440 BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
443 struct bpf_ringbuf_map *rb_map;
446 if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
449 rb_map = container_of(map, struct bpf_ringbuf_map, map);
450 rec = __bpf_ringbuf_reserve(rb_map->rb, size);
454 memcpy(rec, data, size);
455 bpf_ringbuf_commit(rec, flags, false /* discard */);
459 const struct bpf_func_proto bpf_ringbuf_output_proto = {
460 .func = bpf_ringbuf_output,
461 .ret_type = RET_INTEGER,
462 .arg1_type = ARG_CONST_MAP_PTR,
463 .arg2_type = ARG_PTR_TO_MEM,
464 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
465 .arg4_type = ARG_ANYTHING,
468 BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
470 struct bpf_ringbuf *rb;
472 rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
475 case BPF_RB_AVAIL_DATA:
476 return ringbuf_avail_data_sz(rb);
477 case BPF_RB_RING_SIZE:
479 case BPF_RB_CONS_POS:
480 return smp_load_acquire(&rb->consumer_pos);
481 case BPF_RB_PROD_POS:
482 return smp_load_acquire(&rb->producer_pos);
488 const struct bpf_func_proto bpf_ringbuf_query_proto = {
489 .func = bpf_ringbuf_query,
490 .ret_type = RET_INTEGER,
491 .arg1_type = ARG_CONST_MAP_PTR,
492 .arg2_type = ARG_ANYTHING,