1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
15 #include "map_in_map.h"
17 #define ARRAY_CREATE_FLAG_MASK \
18 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
19 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
21 static void bpf_array_free_percpu(struct bpf_array *array)
25 for (i = 0; i < array->map.max_entries; i++) {
26 free_percpu(array->pptrs[i]);
31 static int bpf_array_alloc_percpu(struct bpf_array *array)
36 for (i = 0; i < array->map.max_entries; i++) {
37 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
38 GFP_USER | __GFP_NOWARN);
40 bpf_array_free_percpu(array);
43 array->pptrs[i] = ptr;
50 /* Called from syscall */
51 int array_map_alloc_check(union bpf_attr *attr)
53 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
54 int numa_node = bpf_map_attr_numa_node(attr);
56 /* check sanity of attributes */
57 if (attr->max_entries == 0 || attr->key_size != 4 ||
58 attr->value_size == 0 ||
59 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
60 !bpf_map_flags_access_ok(attr->map_flags) ||
61 (percpu && numa_node != NUMA_NO_NODE))
64 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
65 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
68 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
69 attr->map_flags & BPF_F_PRESERVE_ELEMS)
72 if (attr->value_size > KMALLOC_MAX_SIZE)
73 /* if value_size is bigger, the user space won't be able to
74 * access the elements.
81 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
83 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
84 int numa_node = bpf_map_attr_numa_node(attr);
85 u32 elem_size, index_mask, max_entries;
86 bool bypass_spec_v1 = bpf_bypass_spec_v1();
87 u64 array_size, mask64;
88 struct bpf_array *array;
90 elem_size = round_up(attr->value_size, 8);
92 max_entries = attr->max_entries;
94 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
95 * upper most bit set in u32 space is undefined behavior due to
96 * resulting 1U << 32, so do it manually here in u64 space.
98 mask64 = fls_long(max_entries - 1);
99 mask64 = 1ULL << mask64;
103 if (!bypass_spec_v1) {
104 /* round up array size to nearest power of 2,
105 * since cpu will speculate within index_mask limits
107 max_entries = index_mask + 1;
108 /* Check for overflows. */
109 if (max_entries < attr->max_entries)
110 return ERR_PTR(-E2BIG);
113 array_size = sizeof(*array);
115 array_size += (u64) max_entries * sizeof(void *);
117 /* rely on vmalloc() to return page-aligned memory and
118 * ensure array->value is exactly page-aligned
120 if (attr->map_flags & BPF_F_MMAPABLE) {
121 array_size = PAGE_ALIGN(array_size);
122 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
124 array_size += (u64) max_entries * elem_size;
128 /* allocate all map elements and zero-initialize them */
129 if (attr->map_flags & BPF_F_MMAPABLE) {
132 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
133 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
135 return ERR_PTR(-ENOMEM);
136 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
137 - offsetof(struct bpf_array, value);
139 array = bpf_map_area_alloc(array_size, numa_node);
142 return ERR_PTR(-ENOMEM);
143 array->index_mask = index_mask;
144 array->map.bypass_spec_v1 = bypass_spec_v1;
146 /* copy mandatory map attributes */
147 bpf_map_init_from_attr(&array->map, attr);
148 array->elem_size = elem_size;
150 if (percpu && bpf_array_alloc_percpu(array)) {
151 bpf_map_area_free(array);
152 return ERR_PTR(-ENOMEM);
158 /* Called from syscall or from eBPF program */
159 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
161 struct bpf_array *array = container_of(map, struct bpf_array, map);
162 u32 index = *(u32 *)key;
164 if (unlikely(index >= array->map.max_entries))
167 return array->value + array->elem_size * (index & array->index_mask);
170 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
173 struct bpf_array *array = container_of(map, struct bpf_array, map);
175 if (map->max_entries != 1)
177 if (off >= map->value_size)
180 *imm = (unsigned long)array->value;
184 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
187 struct bpf_array *array = container_of(map, struct bpf_array, map);
188 u64 base = (unsigned long)array->value;
189 u64 range = array->elem_size;
191 if (map->max_entries != 1)
193 if (imm < base || imm >= base + range)
200 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
201 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
203 struct bpf_array *array = container_of(map, struct bpf_array, map);
204 struct bpf_insn *insn = insn_buf;
205 u32 elem_size = round_up(map->value_size, 8);
206 const int ret = BPF_REG_0;
207 const int map_ptr = BPF_REG_1;
208 const int index = BPF_REG_2;
210 if (map->map_flags & BPF_F_INNER_MAP)
213 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
214 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
215 if (!map->bypass_spec_v1) {
216 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
217 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
219 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
222 if (is_power_of_2(elem_size)) {
223 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
225 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
227 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
228 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
229 *insn++ = BPF_MOV64_IMM(ret, 0);
230 return insn - insn_buf;
233 /* Called from eBPF program */
234 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
236 struct bpf_array *array = container_of(map, struct bpf_array, map);
237 u32 index = *(u32 *)key;
239 if (unlikely(index >= array->map.max_entries))
242 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
245 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
247 struct bpf_array *array = container_of(map, struct bpf_array, map);
248 u32 index = *(u32 *)key;
253 if (unlikely(index >= array->map.max_entries))
256 /* per_cpu areas are zero-filled and bpf programs can only
257 * access 'value_size' of them, so copying rounded areas
258 * will not leak any kernel data
260 size = round_up(map->value_size, 8);
262 pptr = array->pptrs[index & array->index_mask];
263 for_each_possible_cpu(cpu) {
264 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
271 /* Called from syscall */
272 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
274 struct bpf_array *array = container_of(map, struct bpf_array, map);
275 u32 index = key ? *(u32 *)key : U32_MAX;
276 u32 *next = (u32 *)next_key;
278 if (index >= array->map.max_entries) {
283 if (index == array->map.max_entries - 1)
290 static void check_and_free_timer_in_array(struct bpf_array *arr, void *val)
292 if (unlikely(map_value_has_timer(&arr->map)))
293 bpf_timer_cancel_and_free(val + arr->map.timer_off);
296 /* Called from syscall or from eBPF program */
297 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
300 struct bpf_array *array = container_of(map, struct bpf_array, map);
301 u32 index = *(u32 *)key;
304 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
308 if (unlikely(index >= array->map.max_entries))
309 /* all elements were pre-allocated, cannot insert a new one */
312 if (unlikely(map_flags & BPF_NOEXIST))
313 /* all elements already exist */
316 if (unlikely((map_flags & BPF_F_LOCK) &&
317 !map_value_has_spin_lock(map)))
320 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
321 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
322 value, map->value_size);
325 array->elem_size * (index & array->index_mask);
326 if (map_flags & BPF_F_LOCK)
327 copy_map_value_locked(map, val, value, false);
329 copy_map_value(map, val, value);
330 check_and_free_timer_in_array(array, val);
335 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
338 struct bpf_array *array = container_of(map, struct bpf_array, map);
339 u32 index = *(u32 *)key;
344 if (unlikely(map_flags > BPF_EXIST))
348 if (unlikely(index >= array->map.max_entries))
349 /* all elements were pre-allocated, cannot insert a new one */
352 if (unlikely(map_flags == BPF_NOEXIST))
353 /* all elements already exist */
356 /* the user space will provide round_up(value_size, 8) bytes that
357 * will be copied into per-cpu area. bpf programs can only access
358 * value_size of it. During lookup the same extra bytes will be
359 * returned or zeros which were zero-filled by percpu_alloc,
360 * so no kernel data leaks possible
362 size = round_up(map->value_size, 8);
364 pptr = array->pptrs[index & array->index_mask];
365 for_each_possible_cpu(cpu) {
366 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
373 /* Called from syscall or from eBPF program */
374 static int array_map_delete_elem(struct bpf_map *map, void *key)
379 static void *array_map_vmalloc_addr(struct bpf_array *array)
381 return (void *)round_down((unsigned long)array, PAGE_SIZE);
384 static void array_map_free_timers(struct bpf_map *map)
386 struct bpf_array *array = container_of(map, struct bpf_array, map);
389 if (likely(!map_value_has_timer(map)))
392 for (i = 0; i < array->map.max_entries; i++)
393 bpf_timer_cancel_and_free(array->value + array->elem_size * i +
397 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
398 static void array_map_free(struct bpf_map *map)
400 struct bpf_array *array = container_of(map, struct bpf_array, map);
402 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
403 bpf_array_free_percpu(array);
405 if (array->map.map_flags & BPF_F_MMAPABLE)
406 bpf_map_area_free(array_map_vmalloc_addr(array));
408 bpf_map_area_free(array);
411 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
418 value = array_map_lookup_elem(map, key);
424 if (map->btf_key_type_id)
425 seq_printf(m, "%u: ", *(u32 *)key);
426 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
432 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
435 struct bpf_array *array = container_of(map, struct bpf_array, map);
436 u32 index = *(u32 *)key;
442 seq_printf(m, "%u: {\n", *(u32 *)key);
443 pptr = array->pptrs[index & array->index_mask];
444 for_each_possible_cpu(cpu) {
445 seq_printf(m, "\tcpu%d: ", cpu);
446 btf_type_seq_show(map->btf, map->btf_value_type_id,
447 per_cpu_ptr(pptr, cpu), m);
455 static int array_map_check_btf(const struct bpf_map *map,
456 const struct btf *btf,
457 const struct btf_type *key_type,
458 const struct btf_type *value_type)
462 /* One exception for keyless BTF: .bss/.data/.rodata map */
463 if (btf_type_is_void(key_type)) {
464 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
465 map->max_entries != 1)
468 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
474 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
477 int_data = *(u32 *)(key_type + 1);
478 /* bpf array can only take a u32 key. This check makes sure
479 * that the btf matches the attr used during map_create.
481 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
487 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
489 struct bpf_array *array = container_of(map, struct bpf_array, map);
490 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
492 if (!(map->map_flags & BPF_F_MMAPABLE))
495 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
496 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
499 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
500 vma->vm_pgoff + pgoff);
503 static bool array_map_meta_equal(const struct bpf_map *meta0,
504 const struct bpf_map *meta1)
506 if (!bpf_map_meta_equal(meta0, meta1))
508 return meta0->map_flags & BPF_F_INNER_MAP ? true :
509 meta0->max_entries == meta1->max_entries;
512 struct bpf_iter_seq_array_map_info {
514 void *percpu_value_buf;
518 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
520 struct bpf_iter_seq_array_map_info *info = seq->private;
521 struct bpf_map *map = info->map;
522 struct bpf_array *array;
525 if (info->index >= map->max_entries)
530 array = container_of(map, struct bpf_array, map);
531 index = info->index & array->index_mask;
532 if (info->percpu_value_buf)
533 return array->pptrs[index];
534 return array->value + array->elem_size * index;
537 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
539 struct bpf_iter_seq_array_map_info *info = seq->private;
540 struct bpf_map *map = info->map;
541 struct bpf_array *array;
546 if (info->index >= map->max_entries)
549 array = container_of(map, struct bpf_array, map);
550 index = info->index & array->index_mask;
551 if (info->percpu_value_buf)
552 return array->pptrs[index];
553 return array->value + array->elem_size * index;
556 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
558 struct bpf_iter_seq_array_map_info *info = seq->private;
559 struct bpf_iter__bpf_map_elem ctx = {};
560 struct bpf_map *map = info->map;
561 struct bpf_iter_meta meta;
562 struct bpf_prog *prog;
563 int off = 0, cpu = 0;
564 void __percpu **pptr;
568 prog = bpf_iter_get_info(&meta, v == NULL);
575 ctx.key = &info->index;
577 if (!info->percpu_value_buf) {
581 size = round_up(map->value_size, 8);
582 for_each_possible_cpu(cpu) {
583 bpf_long_memcpy(info->percpu_value_buf + off,
584 per_cpu_ptr(pptr, cpu),
588 ctx.value = info->percpu_value_buf;
592 return bpf_iter_run_prog(prog, &ctx);
595 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
597 return __bpf_array_map_seq_show(seq, v);
600 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
603 (void)__bpf_array_map_seq_show(seq, NULL);
606 static int bpf_iter_init_array_map(void *priv_data,
607 struct bpf_iter_aux_info *aux)
609 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
610 struct bpf_map *map = aux->map;
614 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
615 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
616 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
620 seq_info->percpu_value_buf = value_buf;
627 static void bpf_iter_fini_array_map(void *priv_data)
629 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
631 kfree(seq_info->percpu_value_buf);
634 static const struct seq_operations bpf_array_map_seq_ops = {
635 .start = bpf_array_map_seq_start,
636 .next = bpf_array_map_seq_next,
637 .stop = bpf_array_map_seq_stop,
638 .show = bpf_array_map_seq_show,
641 static const struct bpf_iter_seq_info iter_seq_info = {
642 .seq_ops = &bpf_array_map_seq_ops,
643 .init_seq_private = bpf_iter_init_array_map,
644 .fini_seq_private = bpf_iter_fini_array_map,
645 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
648 static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
649 void *callback_ctx, u64 flags)
651 u32 i, key, num_elems = 0;
652 struct bpf_array *array;
660 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
661 array = container_of(map, struct bpf_array, map);
664 for (i = 0; i < map->max_entries; i++) {
666 val = this_cpu_ptr(array->pptrs[i]);
668 val = array->value + array->elem_size * i;
671 ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
672 (u64)(long)&key, (u64)(long)val,
673 (u64)(long)callback_ctx, 0);
674 /* return value: 0 - continue, 1 - stop and return */
684 static int array_map_btf_id;
685 const struct bpf_map_ops array_map_ops = {
686 .map_meta_equal = array_map_meta_equal,
687 .map_alloc_check = array_map_alloc_check,
688 .map_alloc = array_map_alloc,
689 .map_free = array_map_free,
690 .map_get_next_key = array_map_get_next_key,
691 .map_release_uref = array_map_free_timers,
692 .map_lookup_elem = array_map_lookup_elem,
693 .map_update_elem = array_map_update_elem,
694 .map_delete_elem = array_map_delete_elem,
695 .map_gen_lookup = array_map_gen_lookup,
696 .map_direct_value_addr = array_map_direct_value_addr,
697 .map_direct_value_meta = array_map_direct_value_meta,
698 .map_mmap = array_map_mmap,
699 .map_seq_show_elem = array_map_seq_show_elem,
700 .map_check_btf = array_map_check_btf,
701 .map_lookup_batch = generic_map_lookup_batch,
702 .map_update_batch = generic_map_update_batch,
703 .map_set_for_each_callback_args = map_set_for_each_callback_args,
704 .map_for_each_callback = bpf_for_each_array_elem,
705 .map_btf_name = "bpf_array",
706 .map_btf_id = &array_map_btf_id,
707 .iter_seq_info = &iter_seq_info,
710 static int percpu_array_map_btf_id;
711 const struct bpf_map_ops percpu_array_map_ops = {
712 .map_meta_equal = bpf_map_meta_equal,
713 .map_alloc_check = array_map_alloc_check,
714 .map_alloc = array_map_alloc,
715 .map_free = array_map_free,
716 .map_get_next_key = array_map_get_next_key,
717 .map_lookup_elem = percpu_array_map_lookup_elem,
718 .map_update_elem = array_map_update_elem,
719 .map_delete_elem = array_map_delete_elem,
720 .map_seq_show_elem = percpu_array_map_seq_show_elem,
721 .map_check_btf = array_map_check_btf,
722 .map_lookup_batch = generic_map_lookup_batch,
723 .map_update_batch = generic_map_update_batch,
724 .map_set_for_each_callback_args = map_set_for_each_callback_args,
725 .map_for_each_callback = bpf_for_each_array_elem,
726 .map_btf_name = "bpf_array",
727 .map_btf_id = &percpu_array_map_btf_id,
728 .iter_seq_info = &iter_seq_info,
731 static int fd_array_map_alloc_check(union bpf_attr *attr)
733 /* only file descriptors can be stored in this type of map */
734 if (attr->value_size != sizeof(u32))
736 /* Program read-only/write-only not supported for special maps yet. */
737 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
739 return array_map_alloc_check(attr);
742 static void fd_array_map_free(struct bpf_map *map)
744 struct bpf_array *array = container_of(map, struct bpf_array, map);
747 /* make sure it's empty */
748 for (i = 0; i < array->map.max_entries; i++)
749 BUG_ON(array->ptrs[i] != NULL);
751 bpf_map_area_free(array);
754 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
756 return ERR_PTR(-EOPNOTSUPP);
759 /* only called from syscall */
760 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
765 if (!map->ops->map_fd_sys_lookup_elem)
769 elem = array_map_lookup_elem(map, key);
770 if (elem && (ptr = READ_ONCE(*elem)))
771 *value = map->ops->map_fd_sys_lookup_elem(ptr);
779 /* only called from syscall */
780 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
781 void *key, void *value, u64 map_flags)
783 struct bpf_array *array = container_of(map, struct bpf_array, map);
784 void *new_ptr, *old_ptr;
785 u32 index = *(u32 *)key, ufd;
787 if (map_flags != BPF_ANY)
790 if (index >= array->map.max_entries)
794 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
796 return PTR_ERR(new_ptr);
798 if (map->ops->map_poke_run) {
799 mutex_lock(&array->aux->poke_mutex);
800 old_ptr = xchg(array->ptrs + index, new_ptr);
801 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
802 mutex_unlock(&array->aux->poke_mutex);
804 old_ptr = xchg(array->ptrs + index, new_ptr);
808 map->ops->map_fd_put_ptr(old_ptr);
812 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
814 struct bpf_array *array = container_of(map, struct bpf_array, map);
816 u32 index = *(u32 *)key;
818 if (index >= array->map.max_entries)
821 if (map->ops->map_poke_run) {
822 mutex_lock(&array->aux->poke_mutex);
823 old_ptr = xchg(array->ptrs + index, NULL);
824 map->ops->map_poke_run(map, index, old_ptr, NULL);
825 mutex_unlock(&array->aux->poke_mutex);
827 old_ptr = xchg(array->ptrs + index, NULL);
831 map->ops->map_fd_put_ptr(old_ptr);
838 static void *prog_fd_array_get_ptr(struct bpf_map *map,
839 struct file *map_file, int fd)
841 struct bpf_array *array = container_of(map, struct bpf_array, map);
842 struct bpf_prog *prog = bpf_prog_get(fd);
847 if (!bpf_prog_array_compatible(array, prog)) {
849 return ERR_PTR(-EINVAL);
855 static void prog_fd_array_put_ptr(void *ptr)
860 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
862 return ((struct bpf_prog *)ptr)->aux->id;
865 /* decrement refcnt of all bpf_progs that are stored in this map */
866 static void bpf_fd_array_map_clear(struct bpf_map *map)
868 struct bpf_array *array = container_of(map, struct bpf_array, map);
871 for (i = 0; i < array->map.max_entries; i++)
872 fd_array_map_delete_elem(map, &i);
875 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
883 elem = array_map_lookup_elem(map, key);
885 ptr = READ_ONCE(*elem);
887 seq_printf(m, "%u: ", *(u32 *)key);
888 prog_id = prog_fd_array_sys_lookup_elem(ptr);
889 btf_type_seq_show(map->btf, map->btf_value_type_id,
898 struct prog_poke_elem {
899 struct list_head list;
900 struct bpf_prog_aux *aux;
903 static int prog_array_map_poke_track(struct bpf_map *map,
904 struct bpf_prog_aux *prog_aux)
906 struct prog_poke_elem *elem;
907 struct bpf_array_aux *aux;
910 aux = container_of(map, struct bpf_array, map)->aux;
911 mutex_lock(&aux->poke_mutex);
912 list_for_each_entry(elem, &aux->poke_progs, list) {
913 if (elem->aux == prog_aux)
917 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
923 INIT_LIST_HEAD(&elem->list);
924 /* We must track the program's aux info at this point in time
925 * since the program pointer itself may not be stable yet, see
926 * also comment in prog_array_map_poke_run().
928 elem->aux = prog_aux;
930 list_add_tail(&elem->list, &aux->poke_progs);
932 mutex_unlock(&aux->poke_mutex);
936 static void prog_array_map_poke_untrack(struct bpf_map *map,
937 struct bpf_prog_aux *prog_aux)
939 struct prog_poke_elem *elem, *tmp;
940 struct bpf_array_aux *aux;
942 aux = container_of(map, struct bpf_array, map)->aux;
943 mutex_lock(&aux->poke_mutex);
944 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
945 if (elem->aux == prog_aux) {
946 list_del_init(&elem->list);
951 mutex_unlock(&aux->poke_mutex);
954 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
955 struct bpf_prog *old,
956 struct bpf_prog *new)
958 u8 *old_addr, *new_addr, *old_bypass_addr;
959 struct prog_poke_elem *elem;
960 struct bpf_array_aux *aux;
962 aux = container_of(map, struct bpf_array, map)->aux;
963 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
965 list_for_each_entry(elem, &aux->poke_progs, list) {
966 struct bpf_jit_poke_descriptor *poke;
969 for (i = 0; i < elem->aux->size_poke_tab; i++) {
970 poke = &elem->aux->poke_tab[i];
972 /* Few things to be aware of:
974 * 1) We can only ever access aux in this context, but
975 * not aux->prog since it might not be stable yet and
976 * there could be danger of use after free otherwise.
977 * 2) Initially when we start tracking aux, the program
978 * is not JITed yet and also does not have a kallsyms
979 * entry. We skip these as poke->tailcall_target_stable
980 * is not active yet. The JIT will do the final fixup
981 * before setting it stable. The various
982 * poke->tailcall_target_stable are successively
983 * activated, so tail call updates can arrive from here
984 * while JIT is still finishing its final fixup for
985 * non-activated poke entries.
986 * 3) On program teardown, the program's kallsym entry gets
987 * removed out of RCU callback, but we can only untrack
988 * from sleepable context, therefore bpf_arch_text_poke()
989 * might not see that this is in BPF text section and
990 * bails out with -EINVAL. As these are unreachable since
991 * RCU grace period already passed, we simply skip them.
992 * 4) Also programs reaching refcount of zero while patching
993 * is in progress is okay since we're protected under
994 * poke_mutex and untrack the programs before the JIT
995 * buffer is freed. When we're still in the middle of
996 * patching and suddenly kallsyms entry of the program
997 * gets evicted, we just skip the rest which is fine due
999 * 5) Any other error happening below from bpf_arch_text_poke()
1000 * is a unexpected bug.
1002 if (!READ_ONCE(poke->tailcall_target_stable))
1004 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1006 if (poke->tail_call.map != map ||
1007 poke->tail_call.key != key)
1010 old_bypass_addr = old ? NULL : poke->bypass_addr;
1011 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1012 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1015 ret = bpf_arch_text_poke(poke->tailcall_target,
1017 old_addr, new_addr);
1018 BUG_ON(ret < 0 && ret != -EINVAL);
1020 ret = bpf_arch_text_poke(poke->tailcall_bypass,
1024 BUG_ON(ret < 0 && ret != -EINVAL);
1027 ret = bpf_arch_text_poke(poke->tailcall_bypass,
1031 BUG_ON(ret < 0 && ret != -EINVAL);
1032 /* let other CPUs finish the execution of program
1033 * so that it will not possible to expose them
1034 * to invalid nop, stack unwind, nop state
1038 ret = bpf_arch_text_poke(poke->tailcall_target,
1041 BUG_ON(ret < 0 && ret != -EINVAL);
1047 static void prog_array_map_clear_deferred(struct work_struct *work)
1049 struct bpf_map *map = container_of(work, struct bpf_array_aux,
1051 bpf_fd_array_map_clear(map);
1055 static void prog_array_map_clear(struct bpf_map *map)
1057 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1060 schedule_work(&aux->work);
1063 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1065 struct bpf_array_aux *aux;
1066 struct bpf_map *map;
1068 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1070 return ERR_PTR(-ENOMEM);
1072 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1073 INIT_LIST_HEAD(&aux->poke_progs);
1074 mutex_init(&aux->poke_mutex);
1076 map = array_map_alloc(attr);
1082 container_of(map, struct bpf_array, map)->aux = aux;
1088 static void prog_array_map_free(struct bpf_map *map)
1090 struct prog_poke_elem *elem, *tmp;
1091 struct bpf_array_aux *aux;
1093 aux = container_of(map, struct bpf_array, map)->aux;
1094 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1095 list_del_init(&elem->list);
1099 fd_array_map_free(map);
1102 /* prog_array->aux->{type,jited} is a runtime binding.
1103 * Doing static check alone in the verifier is not enough.
1104 * Thus, prog_array_map cannot be used as an inner_map
1105 * and map_meta_equal is not implemented.
1107 static int prog_array_map_btf_id;
1108 const struct bpf_map_ops prog_array_map_ops = {
1109 .map_alloc_check = fd_array_map_alloc_check,
1110 .map_alloc = prog_array_map_alloc,
1111 .map_free = prog_array_map_free,
1112 .map_poke_track = prog_array_map_poke_track,
1113 .map_poke_untrack = prog_array_map_poke_untrack,
1114 .map_poke_run = prog_array_map_poke_run,
1115 .map_get_next_key = array_map_get_next_key,
1116 .map_lookup_elem = fd_array_map_lookup_elem,
1117 .map_delete_elem = fd_array_map_delete_elem,
1118 .map_fd_get_ptr = prog_fd_array_get_ptr,
1119 .map_fd_put_ptr = prog_fd_array_put_ptr,
1120 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1121 .map_release_uref = prog_array_map_clear,
1122 .map_seq_show_elem = prog_array_map_seq_show_elem,
1123 .map_btf_name = "bpf_array",
1124 .map_btf_id = &prog_array_map_btf_id,
1127 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1128 struct file *map_file)
1130 struct bpf_event_entry *ee;
1132 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1134 ee->event = perf_file->private_data;
1135 ee->perf_file = perf_file;
1136 ee->map_file = map_file;
1142 static void __bpf_event_entry_free(struct rcu_head *rcu)
1144 struct bpf_event_entry *ee;
1146 ee = container_of(rcu, struct bpf_event_entry, rcu);
1147 fput(ee->perf_file);
1151 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1153 call_rcu(&ee->rcu, __bpf_event_entry_free);
1156 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1157 struct file *map_file, int fd)
1159 struct bpf_event_entry *ee;
1160 struct perf_event *event;
1161 struct file *perf_file;
1164 perf_file = perf_event_get(fd);
1165 if (IS_ERR(perf_file))
1168 ee = ERR_PTR(-EOPNOTSUPP);
1169 event = perf_file->private_data;
1170 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1173 ee = bpf_event_entry_gen(perf_file, map_file);
1176 ee = ERR_PTR(-ENOMEM);
1182 static void perf_event_fd_array_put_ptr(void *ptr)
1184 bpf_event_entry_free_rcu(ptr);
1187 static void perf_event_fd_array_release(struct bpf_map *map,
1188 struct file *map_file)
1190 struct bpf_array *array = container_of(map, struct bpf_array, map);
1191 struct bpf_event_entry *ee;
1194 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1198 for (i = 0; i < array->map.max_entries; i++) {
1199 ee = READ_ONCE(array->ptrs[i]);
1200 if (ee && ee->map_file == map_file)
1201 fd_array_map_delete_elem(map, &i);
1206 static void perf_event_fd_array_map_free(struct bpf_map *map)
1208 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1209 bpf_fd_array_map_clear(map);
1210 fd_array_map_free(map);
1213 static int perf_event_array_map_btf_id;
1214 const struct bpf_map_ops perf_event_array_map_ops = {
1215 .map_meta_equal = bpf_map_meta_equal,
1216 .map_alloc_check = fd_array_map_alloc_check,
1217 .map_alloc = array_map_alloc,
1218 .map_free = perf_event_fd_array_map_free,
1219 .map_get_next_key = array_map_get_next_key,
1220 .map_lookup_elem = fd_array_map_lookup_elem,
1221 .map_delete_elem = fd_array_map_delete_elem,
1222 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1223 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
1224 .map_release = perf_event_fd_array_release,
1225 .map_check_btf = map_check_no_btf,
1226 .map_btf_name = "bpf_array",
1227 .map_btf_id = &perf_event_array_map_btf_id,
1230 #ifdef CONFIG_CGROUPS
1231 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1232 struct file *map_file /* not used */,
1235 return cgroup_get_from_fd(fd);
1238 static void cgroup_fd_array_put_ptr(void *ptr)
1240 /* cgroup_put free cgrp after a rcu grace period */
1244 static void cgroup_fd_array_free(struct bpf_map *map)
1246 bpf_fd_array_map_clear(map);
1247 fd_array_map_free(map);
1250 static int cgroup_array_map_btf_id;
1251 const struct bpf_map_ops cgroup_array_map_ops = {
1252 .map_meta_equal = bpf_map_meta_equal,
1253 .map_alloc_check = fd_array_map_alloc_check,
1254 .map_alloc = array_map_alloc,
1255 .map_free = cgroup_fd_array_free,
1256 .map_get_next_key = array_map_get_next_key,
1257 .map_lookup_elem = fd_array_map_lookup_elem,
1258 .map_delete_elem = fd_array_map_delete_elem,
1259 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1260 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1261 .map_check_btf = map_check_no_btf,
1262 .map_btf_name = "bpf_array",
1263 .map_btf_id = &cgroup_array_map_btf_id,
1267 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1269 struct bpf_map *map, *inner_map_meta;
1271 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1272 if (IS_ERR(inner_map_meta))
1273 return inner_map_meta;
1275 map = array_map_alloc(attr);
1277 bpf_map_meta_free(inner_map_meta);
1281 map->inner_map_meta = inner_map_meta;
1286 static void array_of_map_free(struct bpf_map *map)
1288 /* map->inner_map_meta is only accessed by syscall which
1289 * is protected by fdget/fdput.
1291 bpf_map_meta_free(map->inner_map_meta);
1292 bpf_fd_array_map_clear(map);
1293 fd_array_map_free(map);
1296 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1298 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1303 return READ_ONCE(*inner_map);
1306 static int array_of_map_gen_lookup(struct bpf_map *map,
1307 struct bpf_insn *insn_buf)
1309 struct bpf_array *array = container_of(map, struct bpf_array, map);
1310 u32 elem_size = round_up(map->value_size, 8);
1311 struct bpf_insn *insn = insn_buf;
1312 const int ret = BPF_REG_0;
1313 const int map_ptr = BPF_REG_1;
1314 const int index = BPF_REG_2;
1316 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1317 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1318 if (!map->bypass_spec_v1) {
1319 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1320 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1322 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1324 if (is_power_of_2(elem_size))
1325 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1327 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1328 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1329 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1330 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1331 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1332 *insn++ = BPF_MOV64_IMM(ret, 0);
1334 return insn - insn_buf;
1337 static int array_of_maps_map_btf_id;
1338 const struct bpf_map_ops array_of_maps_map_ops = {
1339 .map_alloc_check = fd_array_map_alloc_check,
1340 .map_alloc = array_of_map_alloc,
1341 .map_free = array_of_map_free,
1342 .map_get_next_key = array_map_get_next_key,
1343 .map_lookup_elem = array_of_map_lookup_elem,
1344 .map_delete_elem = fd_array_map_delete_elem,
1345 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1346 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1347 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1348 .map_gen_lookup = array_of_map_gen_lookup,
1349 .map_check_btf = map_check_no_btf,
1350 .map_btf_name = "bpf_array",
1351 .map_btf_id = &array_of_maps_map_btf_id,