1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
15 #include "map_in_map.h"
17 #define ARRAY_CREATE_FLAG_MASK \
18 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
19 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
21 static void bpf_array_free_percpu(struct bpf_array *array)
25 for (i = 0; i < array->map.max_entries; i++) {
26 free_percpu(array->pptrs[i]);
31 static int bpf_array_alloc_percpu(struct bpf_array *array)
36 for (i = 0; i < array->map.max_entries; i++) {
37 ptr = __alloc_percpu_gfp(array->elem_size, 8,
38 GFP_USER | __GFP_NOWARN);
40 bpf_array_free_percpu(array);
43 array->pptrs[i] = ptr;
50 /* Called from syscall */
51 int array_map_alloc_check(union bpf_attr *attr)
53 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
54 int numa_node = bpf_map_attr_numa_node(attr);
56 /* check sanity of attributes */
57 if (attr->max_entries == 0 || attr->key_size != 4 ||
58 attr->value_size == 0 ||
59 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
60 !bpf_map_flags_access_ok(attr->map_flags) ||
61 (percpu && numa_node != NUMA_NO_NODE))
64 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
65 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
68 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
69 attr->map_flags & BPF_F_PRESERVE_ELEMS)
72 if (attr->value_size > KMALLOC_MAX_SIZE)
73 /* if value_size is bigger, the user space won't be able to
74 * access the elements.
81 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
83 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
84 int ret, numa_node = bpf_map_attr_numa_node(attr);
85 u32 elem_size, index_mask, max_entries;
86 bool bypass_spec_v1 = bpf_bypass_spec_v1();
87 u64 cost, array_size, mask64;
88 struct bpf_map_memory mem;
89 struct bpf_array *array;
91 elem_size = round_up(attr->value_size, 8);
93 max_entries = attr->max_entries;
95 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
96 * upper most bit set in u32 space is undefined behavior due to
97 * resulting 1U << 32, so do it manually here in u64 space.
99 mask64 = fls_long(max_entries - 1);
100 mask64 = 1ULL << mask64;
104 if (!bypass_spec_v1) {
105 /* round up array size to nearest power of 2,
106 * since cpu will speculate within index_mask limits
108 max_entries = index_mask + 1;
109 /* Check for overflows. */
110 if (max_entries < attr->max_entries)
111 return ERR_PTR(-E2BIG);
114 array_size = sizeof(*array);
116 array_size += (u64) max_entries * sizeof(void *);
118 /* rely on vmalloc() to return page-aligned memory and
119 * ensure array->value is exactly page-aligned
121 if (attr->map_flags & BPF_F_MMAPABLE) {
122 array_size = PAGE_ALIGN(array_size);
123 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
125 array_size += (u64) max_entries * elem_size;
129 /* make sure there is no u32 overflow later in round_up() */
132 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
134 ret = bpf_map_charge_init(&mem, cost);
138 /* allocate all map elements and zero-initialize them */
139 if (attr->map_flags & BPF_F_MMAPABLE) {
142 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
143 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
145 bpf_map_charge_finish(&mem);
146 return ERR_PTR(-ENOMEM);
148 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
149 - offsetof(struct bpf_array, value);
151 array = bpf_map_area_alloc(array_size, numa_node);
154 bpf_map_charge_finish(&mem);
155 return ERR_PTR(-ENOMEM);
157 array->index_mask = index_mask;
158 array->map.bypass_spec_v1 = bypass_spec_v1;
160 /* copy mandatory map attributes */
161 bpf_map_init_from_attr(&array->map, attr);
162 bpf_map_charge_move(&array->map.memory, &mem);
163 array->elem_size = elem_size;
165 if (percpu && bpf_array_alloc_percpu(array)) {
166 bpf_map_charge_finish(&array->map.memory);
167 bpf_map_area_free(array);
168 return ERR_PTR(-ENOMEM);
174 /* Called from syscall or from eBPF program */
175 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
177 struct bpf_array *array = container_of(map, struct bpf_array, map);
178 u32 index = *(u32 *)key;
180 if (unlikely(index >= array->map.max_entries))
183 return array->value + array->elem_size * (index & array->index_mask);
186 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
189 struct bpf_array *array = container_of(map, struct bpf_array, map);
191 if (map->max_entries != 1)
193 if (off >= map->value_size)
196 *imm = (unsigned long)array->value;
200 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
203 struct bpf_array *array = container_of(map, struct bpf_array, map);
204 u64 base = (unsigned long)array->value;
205 u64 range = array->elem_size;
207 if (map->max_entries != 1)
209 if (imm < base || imm >= base + range)
216 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
217 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
219 struct bpf_array *array = container_of(map, struct bpf_array, map);
220 struct bpf_insn *insn = insn_buf;
221 u32 elem_size = round_up(map->value_size, 8);
222 const int ret = BPF_REG_0;
223 const int map_ptr = BPF_REG_1;
224 const int index = BPF_REG_2;
226 if (map->map_flags & BPF_F_INNER_MAP)
229 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
230 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
231 if (!map->bypass_spec_v1) {
232 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
233 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
235 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
238 if (is_power_of_2(elem_size)) {
239 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
241 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
243 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
244 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
245 *insn++ = BPF_MOV64_IMM(ret, 0);
246 return insn - insn_buf;
249 /* Called from eBPF program */
250 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
252 struct bpf_array *array = container_of(map, struct bpf_array, map);
253 u32 index = *(u32 *)key;
255 if (unlikely(index >= array->map.max_entries))
258 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
261 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
263 struct bpf_array *array = container_of(map, struct bpf_array, map);
264 u32 index = *(u32 *)key;
269 if (unlikely(index >= array->map.max_entries))
272 /* per_cpu areas are zero-filled and bpf programs can only
273 * access 'value_size' of them, so copying rounded areas
274 * will not leak any kernel data
276 size = round_up(map->value_size, 8);
278 pptr = array->pptrs[index & array->index_mask];
279 for_each_possible_cpu(cpu) {
280 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
287 /* Called from syscall */
288 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
290 struct bpf_array *array = container_of(map, struct bpf_array, map);
291 u32 index = key ? *(u32 *)key : U32_MAX;
292 u32 *next = (u32 *)next_key;
294 if (index >= array->map.max_entries) {
299 if (index == array->map.max_entries - 1)
306 /* Called from syscall or from eBPF program */
307 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
310 struct bpf_array *array = container_of(map, struct bpf_array, map);
311 u32 index = *(u32 *)key;
314 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
318 if (unlikely(index >= array->map.max_entries))
319 /* all elements were pre-allocated, cannot insert a new one */
322 if (unlikely(map_flags & BPF_NOEXIST))
323 /* all elements already exist */
326 if (unlikely((map_flags & BPF_F_LOCK) &&
327 !map_value_has_spin_lock(map)))
330 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
331 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
332 value, map->value_size);
335 array->elem_size * (index & array->index_mask);
336 if (map_flags & BPF_F_LOCK)
337 copy_map_value_locked(map, val, value, false);
339 copy_map_value(map, val, value);
344 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
347 struct bpf_array *array = container_of(map, struct bpf_array, map);
348 u32 index = *(u32 *)key;
353 if (unlikely(map_flags > BPF_EXIST))
357 if (unlikely(index >= array->map.max_entries))
358 /* all elements were pre-allocated, cannot insert a new one */
361 if (unlikely(map_flags == BPF_NOEXIST))
362 /* all elements already exist */
365 /* the user space will provide round_up(value_size, 8) bytes that
366 * will be copied into per-cpu area. bpf programs can only access
367 * value_size of it. During lookup the same extra bytes will be
368 * returned or zeros which were zero-filled by percpu_alloc,
369 * so no kernel data leaks possible
371 size = round_up(map->value_size, 8);
373 pptr = array->pptrs[index & array->index_mask];
374 for_each_possible_cpu(cpu) {
375 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
382 /* Called from syscall or from eBPF program */
383 static int array_map_delete_elem(struct bpf_map *map, void *key)
388 static void *array_map_vmalloc_addr(struct bpf_array *array)
390 return (void *)round_down((unsigned long)array, PAGE_SIZE);
393 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
394 static void array_map_free(struct bpf_map *map)
396 struct bpf_array *array = container_of(map, struct bpf_array, map);
398 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
399 bpf_array_free_percpu(array);
401 if (array->map.map_flags & BPF_F_MMAPABLE)
402 bpf_map_area_free(array_map_vmalloc_addr(array));
404 bpf_map_area_free(array);
407 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
414 value = array_map_lookup_elem(map, key);
420 if (map->btf_key_type_id)
421 seq_printf(m, "%u: ", *(u32 *)key);
422 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
428 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
431 struct bpf_array *array = container_of(map, struct bpf_array, map);
432 u32 index = *(u32 *)key;
438 seq_printf(m, "%u: {\n", *(u32 *)key);
439 pptr = array->pptrs[index & array->index_mask];
440 for_each_possible_cpu(cpu) {
441 seq_printf(m, "\tcpu%d: ", cpu);
442 btf_type_seq_show(map->btf, map->btf_value_type_id,
443 per_cpu_ptr(pptr, cpu), m);
451 static int array_map_check_btf(const struct bpf_map *map,
452 const struct btf *btf,
453 const struct btf_type *key_type,
454 const struct btf_type *value_type)
458 /* One exception for keyless BTF: .bss/.data/.rodata map */
459 if (btf_type_is_void(key_type)) {
460 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
461 map->max_entries != 1)
464 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
470 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
473 int_data = *(u32 *)(key_type + 1);
474 /* bpf array can only take a u32 key. This check makes sure
475 * that the btf matches the attr used during map_create.
477 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
483 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
485 struct bpf_array *array = container_of(map, struct bpf_array, map);
486 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
488 if (!(map->map_flags & BPF_F_MMAPABLE))
491 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
492 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
495 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
496 vma->vm_pgoff + pgoff);
499 static bool array_map_meta_equal(const struct bpf_map *meta0,
500 const struct bpf_map *meta1)
502 if (!bpf_map_meta_equal(meta0, meta1))
504 return meta0->map_flags & BPF_F_INNER_MAP ? true :
505 meta0->max_entries == meta1->max_entries;
508 struct bpf_iter_seq_array_map_info {
510 void *percpu_value_buf;
514 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
516 struct bpf_iter_seq_array_map_info *info = seq->private;
517 struct bpf_map *map = info->map;
518 struct bpf_array *array;
521 if (info->index >= map->max_entries)
526 array = container_of(map, struct bpf_array, map);
527 index = info->index & array->index_mask;
528 if (info->percpu_value_buf)
529 return array->pptrs[index];
530 return array->value + array->elem_size * index;
533 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
535 struct bpf_iter_seq_array_map_info *info = seq->private;
536 struct bpf_map *map = info->map;
537 struct bpf_array *array;
542 if (info->index >= map->max_entries)
545 array = container_of(map, struct bpf_array, map);
546 index = info->index & array->index_mask;
547 if (info->percpu_value_buf)
548 return array->pptrs[index];
549 return array->value + array->elem_size * index;
552 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
554 struct bpf_iter_seq_array_map_info *info = seq->private;
555 struct bpf_iter__bpf_map_elem ctx = {};
556 struct bpf_map *map = info->map;
557 struct bpf_iter_meta meta;
558 struct bpf_prog *prog;
559 int off = 0, cpu = 0;
560 void __percpu **pptr;
564 prog = bpf_iter_get_info(&meta, v == NULL);
571 ctx.key = &info->index;
573 if (!info->percpu_value_buf) {
577 size = round_up(map->value_size, 8);
578 for_each_possible_cpu(cpu) {
579 bpf_long_memcpy(info->percpu_value_buf + off,
580 per_cpu_ptr(pptr, cpu),
584 ctx.value = info->percpu_value_buf;
588 return bpf_iter_run_prog(prog, &ctx);
591 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
593 return __bpf_array_map_seq_show(seq, v);
596 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
599 (void)__bpf_array_map_seq_show(seq, NULL);
602 static int bpf_iter_init_array_map(void *priv_data,
603 struct bpf_iter_aux_info *aux)
605 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
606 struct bpf_map *map = aux->map;
610 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
611 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
612 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
616 seq_info->percpu_value_buf = value_buf;
623 static void bpf_iter_fini_array_map(void *priv_data)
625 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
627 kfree(seq_info->percpu_value_buf);
630 static const struct seq_operations bpf_array_map_seq_ops = {
631 .start = bpf_array_map_seq_start,
632 .next = bpf_array_map_seq_next,
633 .stop = bpf_array_map_seq_stop,
634 .show = bpf_array_map_seq_show,
637 static const struct bpf_iter_seq_info iter_seq_info = {
638 .seq_ops = &bpf_array_map_seq_ops,
639 .init_seq_private = bpf_iter_init_array_map,
640 .fini_seq_private = bpf_iter_fini_array_map,
641 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
644 static int array_map_btf_id;
645 const struct bpf_map_ops array_map_ops = {
646 .map_meta_equal = array_map_meta_equal,
647 .map_alloc_check = array_map_alloc_check,
648 .map_alloc = array_map_alloc,
649 .map_free = array_map_free,
650 .map_get_next_key = array_map_get_next_key,
651 .map_lookup_elem = array_map_lookup_elem,
652 .map_update_elem = array_map_update_elem,
653 .map_delete_elem = array_map_delete_elem,
654 .map_gen_lookup = array_map_gen_lookup,
655 .map_direct_value_addr = array_map_direct_value_addr,
656 .map_direct_value_meta = array_map_direct_value_meta,
657 .map_mmap = array_map_mmap,
658 .map_seq_show_elem = array_map_seq_show_elem,
659 .map_check_btf = array_map_check_btf,
660 .map_lookup_batch = generic_map_lookup_batch,
661 .map_update_batch = generic_map_update_batch,
662 .map_btf_name = "bpf_array",
663 .map_btf_id = &array_map_btf_id,
664 .iter_seq_info = &iter_seq_info,
667 static int percpu_array_map_btf_id;
668 const struct bpf_map_ops percpu_array_map_ops = {
669 .map_meta_equal = bpf_map_meta_equal,
670 .map_alloc_check = array_map_alloc_check,
671 .map_alloc = array_map_alloc,
672 .map_free = array_map_free,
673 .map_get_next_key = array_map_get_next_key,
674 .map_lookup_elem = percpu_array_map_lookup_elem,
675 .map_update_elem = array_map_update_elem,
676 .map_delete_elem = array_map_delete_elem,
677 .map_seq_show_elem = percpu_array_map_seq_show_elem,
678 .map_check_btf = array_map_check_btf,
679 .map_btf_name = "bpf_array",
680 .map_btf_id = &percpu_array_map_btf_id,
681 .iter_seq_info = &iter_seq_info,
684 static int fd_array_map_alloc_check(union bpf_attr *attr)
686 /* only file descriptors can be stored in this type of map */
687 if (attr->value_size != sizeof(u32))
689 /* Program read-only/write-only not supported for special maps yet. */
690 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
692 return array_map_alloc_check(attr);
695 static void fd_array_map_free(struct bpf_map *map)
697 struct bpf_array *array = container_of(map, struct bpf_array, map);
700 /* make sure it's empty */
701 for (i = 0; i < array->map.max_entries; i++)
702 BUG_ON(array->ptrs[i] != NULL);
704 bpf_map_area_free(array);
707 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
709 return ERR_PTR(-EOPNOTSUPP);
712 /* only called from syscall */
713 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
718 if (!map->ops->map_fd_sys_lookup_elem)
722 elem = array_map_lookup_elem(map, key);
723 if (elem && (ptr = READ_ONCE(*elem)))
724 *value = map->ops->map_fd_sys_lookup_elem(ptr);
732 /* only called from syscall */
733 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
734 void *key, void *value, u64 map_flags)
736 struct bpf_array *array = container_of(map, struct bpf_array, map);
737 void *new_ptr, *old_ptr;
738 u32 index = *(u32 *)key, ufd;
740 if (map_flags != BPF_ANY)
743 if (index >= array->map.max_entries)
747 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
749 return PTR_ERR(new_ptr);
751 if (map->ops->map_poke_run) {
752 mutex_lock(&array->aux->poke_mutex);
753 old_ptr = xchg(array->ptrs + index, new_ptr);
754 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
755 mutex_unlock(&array->aux->poke_mutex);
757 old_ptr = xchg(array->ptrs + index, new_ptr);
761 map->ops->map_fd_put_ptr(old_ptr);
765 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
767 struct bpf_array *array = container_of(map, struct bpf_array, map);
769 u32 index = *(u32 *)key;
771 if (index >= array->map.max_entries)
774 if (map->ops->map_poke_run) {
775 mutex_lock(&array->aux->poke_mutex);
776 old_ptr = xchg(array->ptrs + index, NULL);
777 map->ops->map_poke_run(map, index, old_ptr, NULL);
778 mutex_unlock(&array->aux->poke_mutex);
780 old_ptr = xchg(array->ptrs + index, NULL);
784 map->ops->map_fd_put_ptr(old_ptr);
791 static void *prog_fd_array_get_ptr(struct bpf_map *map,
792 struct file *map_file, int fd)
794 struct bpf_array *array = container_of(map, struct bpf_array, map);
795 struct bpf_prog *prog = bpf_prog_get(fd);
800 if (!bpf_prog_array_compatible(array, prog)) {
802 return ERR_PTR(-EINVAL);
808 static void prog_fd_array_put_ptr(void *ptr)
813 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
815 return ((struct bpf_prog *)ptr)->aux->id;
818 /* decrement refcnt of all bpf_progs that are stored in this map */
819 static void bpf_fd_array_map_clear(struct bpf_map *map)
821 struct bpf_array *array = container_of(map, struct bpf_array, map);
824 for (i = 0; i < array->map.max_entries; i++)
825 fd_array_map_delete_elem(map, &i);
828 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
836 elem = array_map_lookup_elem(map, key);
838 ptr = READ_ONCE(*elem);
840 seq_printf(m, "%u: ", *(u32 *)key);
841 prog_id = prog_fd_array_sys_lookup_elem(ptr);
842 btf_type_seq_show(map->btf, map->btf_value_type_id,
851 struct prog_poke_elem {
852 struct list_head list;
853 struct bpf_prog_aux *aux;
856 static int prog_array_map_poke_track(struct bpf_map *map,
857 struct bpf_prog_aux *prog_aux)
859 struct prog_poke_elem *elem;
860 struct bpf_array_aux *aux;
863 aux = container_of(map, struct bpf_array, map)->aux;
864 mutex_lock(&aux->poke_mutex);
865 list_for_each_entry(elem, &aux->poke_progs, list) {
866 if (elem->aux == prog_aux)
870 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
876 INIT_LIST_HEAD(&elem->list);
877 /* We must track the program's aux info at this point in time
878 * since the program pointer itself may not be stable yet, see
879 * also comment in prog_array_map_poke_run().
881 elem->aux = prog_aux;
883 list_add_tail(&elem->list, &aux->poke_progs);
885 mutex_unlock(&aux->poke_mutex);
889 static void prog_array_map_poke_untrack(struct bpf_map *map,
890 struct bpf_prog_aux *prog_aux)
892 struct prog_poke_elem *elem, *tmp;
893 struct bpf_array_aux *aux;
895 aux = container_of(map, struct bpf_array, map)->aux;
896 mutex_lock(&aux->poke_mutex);
897 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
898 if (elem->aux == prog_aux) {
899 list_del_init(&elem->list);
904 mutex_unlock(&aux->poke_mutex);
907 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
908 struct bpf_prog *old,
909 struct bpf_prog *new)
911 u8 *old_addr, *new_addr, *old_bypass_addr;
912 struct prog_poke_elem *elem;
913 struct bpf_array_aux *aux;
915 aux = container_of(map, struct bpf_array, map)->aux;
916 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
918 list_for_each_entry(elem, &aux->poke_progs, list) {
919 struct bpf_jit_poke_descriptor *poke;
922 for (i = 0; i < elem->aux->size_poke_tab; i++) {
923 poke = &elem->aux->poke_tab[i];
925 /* Few things to be aware of:
927 * 1) We can only ever access aux in this context, but
928 * not aux->prog since it might not be stable yet and
929 * there could be danger of use after free otherwise.
930 * 2) Initially when we start tracking aux, the program
931 * is not JITed yet and also does not have a kallsyms
932 * entry. We skip these as poke->tailcall_target_stable
933 * is not active yet. The JIT will do the final fixup
934 * before setting it stable. The various
935 * poke->tailcall_target_stable are successively
936 * activated, so tail call updates can arrive from here
937 * while JIT is still finishing its final fixup for
938 * non-activated poke entries.
939 * 3) On program teardown, the program's kallsym entry gets
940 * removed out of RCU callback, but we can only untrack
941 * from sleepable context, therefore bpf_arch_text_poke()
942 * might not see that this is in BPF text section and
943 * bails out with -EINVAL. As these are unreachable since
944 * RCU grace period already passed, we simply skip them.
945 * 4) Also programs reaching refcount of zero while patching
946 * is in progress is okay since we're protected under
947 * poke_mutex and untrack the programs before the JIT
948 * buffer is freed. When we're still in the middle of
949 * patching and suddenly kallsyms entry of the program
950 * gets evicted, we just skip the rest which is fine due
952 * 5) Any other error happening below from bpf_arch_text_poke()
953 * is a unexpected bug.
955 if (!READ_ONCE(poke->tailcall_target_stable))
957 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
959 if (poke->tail_call.map != map ||
960 poke->tail_call.key != key)
963 old_bypass_addr = old ? NULL : poke->bypass_addr;
964 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
965 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
968 ret = bpf_arch_text_poke(poke->tailcall_target,
971 BUG_ON(ret < 0 && ret != -EINVAL);
973 ret = bpf_arch_text_poke(poke->tailcall_bypass,
977 BUG_ON(ret < 0 && ret != -EINVAL);
980 ret = bpf_arch_text_poke(poke->tailcall_bypass,
984 BUG_ON(ret < 0 && ret != -EINVAL);
985 /* let other CPUs finish the execution of program
986 * so that it will not possible to expose them
987 * to invalid nop, stack unwind, nop state
991 ret = bpf_arch_text_poke(poke->tailcall_target,
994 BUG_ON(ret < 0 && ret != -EINVAL);
1000 static void prog_array_map_clear_deferred(struct work_struct *work)
1002 struct bpf_map *map = container_of(work, struct bpf_array_aux,
1004 bpf_fd_array_map_clear(map);
1008 static void prog_array_map_clear(struct bpf_map *map)
1010 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1013 schedule_work(&aux->work);
1016 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1018 struct bpf_array_aux *aux;
1019 struct bpf_map *map;
1021 aux = kzalloc(sizeof(*aux), GFP_KERNEL);
1023 return ERR_PTR(-ENOMEM);
1025 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1026 INIT_LIST_HEAD(&aux->poke_progs);
1027 mutex_init(&aux->poke_mutex);
1029 map = array_map_alloc(attr);
1035 container_of(map, struct bpf_array, map)->aux = aux;
1041 static void prog_array_map_free(struct bpf_map *map)
1043 struct prog_poke_elem *elem, *tmp;
1044 struct bpf_array_aux *aux;
1046 aux = container_of(map, struct bpf_array, map)->aux;
1047 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1048 list_del_init(&elem->list);
1052 fd_array_map_free(map);
1055 /* prog_array->aux->{type,jited} is a runtime binding.
1056 * Doing static check alone in the verifier is not enough.
1057 * Thus, prog_array_map cannot be used as an inner_map
1058 * and map_meta_equal is not implemented.
1060 static int prog_array_map_btf_id;
1061 const struct bpf_map_ops prog_array_map_ops = {
1062 .map_alloc_check = fd_array_map_alloc_check,
1063 .map_alloc = prog_array_map_alloc,
1064 .map_free = prog_array_map_free,
1065 .map_poke_track = prog_array_map_poke_track,
1066 .map_poke_untrack = prog_array_map_poke_untrack,
1067 .map_poke_run = prog_array_map_poke_run,
1068 .map_get_next_key = array_map_get_next_key,
1069 .map_lookup_elem = fd_array_map_lookup_elem,
1070 .map_delete_elem = fd_array_map_delete_elem,
1071 .map_fd_get_ptr = prog_fd_array_get_ptr,
1072 .map_fd_put_ptr = prog_fd_array_put_ptr,
1073 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1074 .map_release_uref = prog_array_map_clear,
1075 .map_seq_show_elem = prog_array_map_seq_show_elem,
1076 .map_btf_name = "bpf_array",
1077 .map_btf_id = &prog_array_map_btf_id,
1080 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1081 struct file *map_file)
1083 struct bpf_event_entry *ee;
1085 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1087 ee->event = perf_file->private_data;
1088 ee->perf_file = perf_file;
1089 ee->map_file = map_file;
1095 static void __bpf_event_entry_free(struct rcu_head *rcu)
1097 struct bpf_event_entry *ee;
1099 ee = container_of(rcu, struct bpf_event_entry, rcu);
1100 fput(ee->perf_file);
1104 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1106 call_rcu(&ee->rcu, __bpf_event_entry_free);
1109 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1110 struct file *map_file, int fd)
1112 struct bpf_event_entry *ee;
1113 struct perf_event *event;
1114 struct file *perf_file;
1117 perf_file = perf_event_get(fd);
1118 if (IS_ERR(perf_file))
1121 ee = ERR_PTR(-EOPNOTSUPP);
1122 event = perf_file->private_data;
1123 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1126 ee = bpf_event_entry_gen(perf_file, map_file);
1129 ee = ERR_PTR(-ENOMEM);
1135 static void perf_event_fd_array_put_ptr(void *ptr)
1137 bpf_event_entry_free_rcu(ptr);
1140 static void perf_event_fd_array_release(struct bpf_map *map,
1141 struct file *map_file)
1143 struct bpf_array *array = container_of(map, struct bpf_array, map);
1144 struct bpf_event_entry *ee;
1147 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1151 for (i = 0; i < array->map.max_entries; i++) {
1152 ee = READ_ONCE(array->ptrs[i]);
1153 if (ee && ee->map_file == map_file)
1154 fd_array_map_delete_elem(map, &i);
1159 static void perf_event_fd_array_map_free(struct bpf_map *map)
1161 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1162 bpf_fd_array_map_clear(map);
1163 fd_array_map_free(map);
1166 static int perf_event_array_map_btf_id;
1167 const struct bpf_map_ops perf_event_array_map_ops = {
1168 .map_meta_equal = bpf_map_meta_equal,
1169 .map_alloc_check = fd_array_map_alloc_check,
1170 .map_alloc = array_map_alloc,
1171 .map_free = perf_event_fd_array_map_free,
1172 .map_get_next_key = array_map_get_next_key,
1173 .map_lookup_elem = fd_array_map_lookup_elem,
1174 .map_delete_elem = fd_array_map_delete_elem,
1175 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1176 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
1177 .map_release = perf_event_fd_array_release,
1178 .map_check_btf = map_check_no_btf,
1179 .map_btf_name = "bpf_array",
1180 .map_btf_id = &perf_event_array_map_btf_id,
1183 #ifdef CONFIG_CGROUPS
1184 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1185 struct file *map_file /* not used */,
1188 return cgroup_get_from_fd(fd);
1191 static void cgroup_fd_array_put_ptr(void *ptr)
1193 /* cgroup_put free cgrp after a rcu grace period */
1197 static void cgroup_fd_array_free(struct bpf_map *map)
1199 bpf_fd_array_map_clear(map);
1200 fd_array_map_free(map);
1203 static int cgroup_array_map_btf_id;
1204 const struct bpf_map_ops cgroup_array_map_ops = {
1205 .map_meta_equal = bpf_map_meta_equal,
1206 .map_alloc_check = fd_array_map_alloc_check,
1207 .map_alloc = array_map_alloc,
1208 .map_free = cgroup_fd_array_free,
1209 .map_get_next_key = array_map_get_next_key,
1210 .map_lookup_elem = fd_array_map_lookup_elem,
1211 .map_delete_elem = fd_array_map_delete_elem,
1212 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1213 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1214 .map_check_btf = map_check_no_btf,
1215 .map_btf_name = "bpf_array",
1216 .map_btf_id = &cgroup_array_map_btf_id,
1220 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1222 struct bpf_map *map, *inner_map_meta;
1224 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1225 if (IS_ERR(inner_map_meta))
1226 return inner_map_meta;
1228 map = array_map_alloc(attr);
1230 bpf_map_meta_free(inner_map_meta);
1234 map->inner_map_meta = inner_map_meta;
1239 static void array_of_map_free(struct bpf_map *map)
1241 /* map->inner_map_meta is only accessed by syscall which
1242 * is protected by fdget/fdput.
1244 bpf_map_meta_free(map->inner_map_meta);
1245 bpf_fd_array_map_clear(map);
1246 fd_array_map_free(map);
1249 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1251 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1256 return READ_ONCE(*inner_map);
1259 static int array_of_map_gen_lookup(struct bpf_map *map,
1260 struct bpf_insn *insn_buf)
1262 struct bpf_array *array = container_of(map, struct bpf_array, map);
1263 u32 elem_size = round_up(map->value_size, 8);
1264 struct bpf_insn *insn = insn_buf;
1265 const int ret = BPF_REG_0;
1266 const int map_ptr = BPF_REG_1;
1267 const int index = BPF_REG_2;
1269 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1270 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1271 if (!map->bypass_spec_v1) {
1272 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1273 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1275 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1277 if (is_power_of_2(elem_size))
1278 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1280 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1281 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1282 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1283 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1284 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1285 *insn++ = BPF_MOV64_IMM(ret, 0);
1287 return insn - insn_buf;
1290 static int array_of_maps_map_btf_id;
1291 const struct bpf_map_ops array_of_maps_map_ops = {
1292 .map_alloc_check = fd_array_map_alloc_check,
1293 .map_alloc = array_of_map_alloc,
1294 .map_free = array_of_map_free,
1295 .map_get_next_key = array_map_get_next_key,
1296 .map_lookup_elem = array_of_map_lookup_elem,
1297 .map_delete_elem = fd_array_map_delete_elem,
1298 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1299 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1300 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1301 .map_gen_lookup = array_of_map_gen_lookup,
1302 .map_check_btf = map_check_no_btf,
1303 .map_btf_name = "bpf_array",
1304 .map_btf_id = &array_of_maps_map_btf_id,