1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/btf.h>
15 #include <linux/err.h>
16 #include <linux/slab.h>
18 #include <linux/filter.h>
19 #include <linux/perf_event.h>
20 #include <uapi/linux/btf.h>
22 #include "map_in_map.h"
24 #define ARRAY_CREATE_FLAG_MASK \
25 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
27 static void bpf_array_free_percpu(struct bpf_array *array)
31 for (i = 0; i < array->map.max_entries; i++) {
32 free_percpu(array->pptrs[i]);
37 static int bpf_array_alloc_percpu(struct bpf_array *array)
42 for (i = 0; i < array->map.max_entries; i++) {
43 ptr = __alloc_percpu_gfp(array->elem_size, 8,
44 GFP_USER | __GFP_NOWARN);
46 bpf_array_free_percpu(array);
49 array->pptrs[i] = ptr;
56 /* Called from syscall */
57 int array_map_alloc_check(union bpf_attr *attr)
59 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
60 int numa_node = bpf_map_attr_numa_node(attr);
62 /* check sanity of attributes */
63 if (attr->max_entries == 0 || attr->key_size != 4 ||
64 attr->value_size == 0 ||
65 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
66 !bpf_map_flags_access_ok(attr->map_flags) ||
67 (percpu && numa_node != NUMA_NO_NODE))
70 if (attr->value_size > KMALLOC_MAX_SIZE)
71 /* if value_size is bigger, the user space won't be able to
72 * access the elements.
79 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
81 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
82 int ret, numa_node = bpf_map_attr_numa_node(attr);
83 u32 elem_size, index_mask, max_entries;
84 bool unpriv = !capable(CAP_SYS_ADMIN);
85 u64 cost, array_size, mask64;
86 struct bpf_map_memory mem;
87 struct bpf_array *array;
89 elem_size = round_up(attr->value_size, 8);
91 max_entries = attr->max_entries;
93 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
94 * upper most bit set in u32 space is undefined behavior due to
95 * resulting 1U << 32, so do it manually here in u64 space.
97 mask64 = fls_long(max_entries - 1);
98 mask64 = 1ULL << mask64;
103 /* round up array size to nearest power of 2,
104 * since cpu will speculate within index_mask limits
106 max_entries = index_mask + 1;
107 /* Check for overflows. */
108 if (max_entries < attr->max_entries)
109 return ERR_PTR(-E2BIG);
112 array_size = sizeof(*array);
114 array_size += (u64) max_entries * sizeof(void *);
116 array_size += (u64) max_entries * elem_size;
118 /* make sure there is no u32 overflow later in round_up() */
120 if (cost >= U32_MAX - PAGE_SIZE)
121 return ERR_PTR(-ENOMEM);
123 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
124 if (cost >= U32_MAX - PAGE_SIZE)
125 return ERR_PTR(-ENOMEM);
127 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
129 ret = bpf_map_charge_init(&mem, cost);
133 /* allocate all map elements and zero-initialize them */
134 array = bpf_map_area_alloc(array_size, numa_node);
136 bpf_map_charge_finish(&mem);
137 return ERR_PTR(-ENOMEM);
139 array->index_mask = index_mask;
140 array->map.unpriv_array = unpriv;
142 /* copy mandatory map attributes */
143 bpf_map_init_from_attr(&array->map, attr);
144 bpf_map_charge_move(&array->map.memory, &mem);
145 array->elem_size = elem_size;
147 if (percpu && bpf_array_alloc_percpu(array)) {
148 bpf_map_charge_finish(&array->map.memory);
149 bpf_map_area_free(array);
150 return ERR_PTR(-ENOMEM);
156 /* Called from syscall or from eBPF program */
157 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
159 struct bpf_array *array = container_of(map, struct bpf_array, map);
160 u32 index = *(u32 *)key;
162 if (unlikely(index >= array->map.max_entries))
165 return array->value + array->elem_size * (index & array->index_mask);
168 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
171 struct bpf_array *array = container_of(map, struct bpf_array, map);
173 if (map->max_entries != 1)
175 if (off >= map->value_size)
178 *imm = (unsigned long)array->value;
182 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
185 struct bpf_array *array = container_of(map, struct bpf_array, map);
186 u64 base = (unsigned long)array->value;
187 u64 range = array->elem_size;
189 if (map->max_entries != 1)
191 if (imm < base || imm >= base + range)
198 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
199 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
201 struct bpf_array *array = container_of(map, struct bpf_array, map);
202 struct bpf_insn *insn = insn_buf;
203 u32 elem_size = round_up(map->value_size, 8);
204 const int ret = BPF_REG_0;
205 const int map_ptr = BPF_REG_1;
206 const int index = BPF_REG_2;
208 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
209 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
210 if (map->unpriv_array) {
211 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
212 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
214 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
217 if (is_power_of_2(elem_size)) {
218 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
220 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
222 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
223 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
224 *insn++ = BPF_MOV64_IMM(ret, 0);
225 return insn - insn_buf;
228 /* Called from eBPF program */
229 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
231 struct bpf_array *array = container_of(map, struct bpf_array, map);
232 u32 index = *(u32 *)key;
234 if (unlikely(index >= array->map.max_entries))
237 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
240 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
242 struct bpf_array *array = container_of(map, struct bpf_array, map);
243 u32 index = *(u32 *)key;
248 if (unlikely(index >= array->map.max_entries))
251 /* per_cpu areas are zero-filled and bpf programs can only
252 * access 'value_size' of them, so copying rounded areas
253 * will not leak any kernel data
255 size = round_up(map->value_size, 8);
257 pptr = array->pptrs[index & array->index_mask];
258 for_each_possible_cpu(cpu) {
259 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
266 /* Called from syscall */
267 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
269 struct bpf_array *array = container_of(map, struct bpf_array, map);
270 u32 index = key ? *(u32 *)key : U32_MAX;
271 u32 *next = (u32 *)next_key;
273 if (index >= array->map.max_entries) {
278 if (index == array->map.max_entries - 1)
285 /* Called from syscall or from eBPF program */
286 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
289 struct bpf_array *array = container_of(map, struct bpf_array, map);
290 u32 index = *(u32 *)key;
293 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
297 if (unlikely(index >= array->map.max_entries))
298 /* all elements were pre-allocated, cannot insert a new one */
301 if (unlikely(map_flags & BPF_NOEXIST))
302 /* all elements already exist */
305 if (unlikely((map_flags & BPF_F_LOCK) &&
306 !map_value_has_spin_lock(map)))
309 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
310 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
311 value, map->value_size);
314 array->elem_size * (index & array->index_mask);
315 if (map_flags & BPF_F_LOCK)
316 copy_map_value_locked(map, val, value, false);
318 copy_map_value(map, val, value);
323 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
326 struct bpf_array *array = container_of(map, struct bpf_array, map);
327 u32 index = *(u32 *)key;
332 if (unlikely(map_flags > BPF_EXIST))
336 if (unlikely(index >= array->map.max_entries))
337 /* all elements were pre-allocated, cannot insert a new one */
340 if (unlikely(map_flags == BPF_NOEXIST))
341 /* all elements already exist */
344 /* the user space will provide round_up(value_size, 8) bytes that
345 * will be copied into per-cpu area. bpf programs can only access
346 * value_size of it. During lookup the same extra bytes will be
347 * returned or zeros which were zero-filled by percpu_alloc,
348 * so no kernel data leaks possible
350 size = round_up(map->value_size, 8);
352 pptr = array->pptrs[index & array->index_mask];
353 for_each_possible_cpu(cpu) {
354 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
361 /* Called from syscall or from eBPF program */
362 static int array_map_delete_elem(struct bpf_map *map, void *key)
367 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
368 static void array_map_free(struct bpf_map *map)
370 struct bpf_array *array = container_of(map, struct bpf_array, map);
372 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
373 * so the programs (can be more than one that used this map) were
374 * disconnected from events. Wait for outstanding programs to complete
379 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
380 bpf_array_free_percpu(array);
382 bpf_map_area_free(array);
385 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
392 value = array_map_lookup_elem(map, key);
398 if (map->btf_key_type_id)
399 seq_printf(m, "%u: ", *(u32 *)key);
400 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
406 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
409 struct bpf_array *array = container_of(map, struct bpf_array, map);
410 u32 index = *(u32 *)key;
416 seq_printf(m, "%u: {\n", *(u32 *)key);
417 pptr = array->pptrs[index & array->index_mask];
418 for_each_possible_cpu(cpu) {
419 seq_printf(m, "\tcpu%d: ", cpu);
420 btf_type_seq_show(map->btf, map->btf_value_type_id,
421 per_cpu_ptr(pptr, cpu), m);
429 static int array_map_check_btf(const struct bpf_map *map,
430 const struct btf *btf,
431 const struct btf_type *key_type,
432 const struct btf_type *value_type)
436 /* One exception for keyless BTF: .bss/.data/.rodata map */
437 if (btf_type_is_void(key_type)) {
438 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
439 map->max_entries != 1)
442 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
448 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
451 int_data = *(u32 *)(key_type + 1);
452 /* bpf array can only take a u32 key. This check makes sure
453 * that the btf matches the attr used during map_create.
455 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
461 const struct bpf_map_ops array_map_ops = {
462 .map_alloc_check = array_map_alloc_check,
463 .map_alloc = array_map_alloc,
464 .map_free = array_map_free,
465 .map_get_next_key = array_map_get_next_key,
466 .map_lookup_elem = array_map_lookup_elem,
467 .map_update_elem = array_map_update_elem,
468 .map_delete_elem = array_map_delete_elem,
469 .map_gen_lookup = array_map_gen_lookup,
470 .map_direct_value_addr = array_map_direct_value_addr,
471 .map_direct_value_meta = array_map_direct_value_meta,
472 .map_seq_show_elem = array_map_seq_show_elem,
473 .map_check_btf = array_map_check_btf,
476 const struct bpf_map_ops percpu_array_map_ops = {
477 .map_alloc_check = array_map_alloc_check,
478 .map_alloc = array_map_alloc,
479 .map_free = array_map_free,
480 .map_get_next_key = array_map_get_next_key,
481 .map_lookup_elem = percpu_array_map_lookup_elem,
482 .map_update_elem = array_map_update_elem,
483 .map_delete_elem = array_map_delete_elem,
484 .map_seq_show_elem = percpu_array_map_seq_show_elem,
485 .map_check_btf = array_map_check_btf,
488 static int fd_array_map_alloc_check(union bpf_attr *attr)
490 /* only file descriptors can be stored in this type of map */
491 if (attr->value_size != sizeof(u32))
493 /* Program read-only/write-only not supported for special maps yet. */
494 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
496 return array_map_alloc_check(attr);
499 static void fd_array_map_free(struct bpf_map *map)
501 struct bpf_array *array = container_of(map, struct bpf_array, map);
506 /* make sure it's empty */
507 for (i = 0; i < array->map.max_entries; i++)
508 BUG_ON(array->ptrs[i] != NULL);
510 bpf_map_area_free(array);
513 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
515 return ERR_PTR(-EOPNOTSUPP);
518 /* only called from syscall */
519 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
524 if (!map->ops->map_fd_sys_lookup_elem)
528 elem = array_map_lookup_elem(map, key);
529 if (elem && (ptr = READ_ONCE(*elem)))
530 *value = map->ops->map_fd_sys_lookup_elem(ptr);
538 /* only called from syscall */
539 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
540 void *key, void *value, u64 map_flags)
542 struct bpf_array *array = container_of(map, struct bpf_array, map);
543 void *new_ptr, *old_ptr;
544 u32 index = *(u32 *)key, ufd;
546 if (map_flags != BPF_ANY)
549 if (index >= array->map.max_entries)
553 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
555 return PTR_ERR(new_ptr);
557 old_ptr = xchg(array->ptrs + index, new_ptr);
559 map->ops->map_fd_put_ptr(old_ptr);
564 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
566 struct bpf_array *array = container_of(map, struct bpf_array, map);
568 u32 index = *(u32 *)key;
570 if (index >= array->map.max_entries)
573 old_ptr = xchg(array->ptrs + index, NULL);
575 map->ops->map_fd_put_ptr(old_ptr);
582 static void *prog_fd_array_get_ptr(struct bpf_map *map,
583 struct file *map_file, int fd)
585 struct bpf_array *array = container_of(map, struct bpf_array, map);
586 struct bpf_prog *prog = bpf_prog_get(fd);
591 if (!bpf_prog_array_compatible(array, prog)) {
593 return ERR_PTR(-EINVAL);
599 static void prog_fd_array_put_ptr(void *ptr)
604 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
606 return ((struct bpf_prog *)ptr)->aux->id;
609 /* decrement refcnt of all bpf_progs that are stored in this map */
610 static void bpf_fd_array_map_clear(struct bpf_map *map)
612 struct bpf_array *array = container_of(map, struct bpf_array, map);
615 for (i = 0; i < array->map.max_entries; i++)
616 fd_array_map_delete_elem(map, &i);
619 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
627 elem = array_map_lookup_elem(map, key);
629 ptr = READ_ONCE(*elem);
631 seq_printf(m, "%u: ", *(u32 *)key);
632 prog_id = prog_fd_array_sys_lookup_elem(ptr);
633 btf_type_seq_show(map->btf, map->btf_value_type_id,
642 const struct bpf_map_ops prog_array_map_ops = {
643 .map_alloc_check = fd_array_map_alloc_check,
644 .map_alloc = array_map_alloc,
645 .map_free = fd_array_map_free,
646 .map_get_next_key = array_map_get_next_key,
647 .map_lookup_elem = fd_array_map_lookup_elem,
648 .map_delete_elem = fd_array_map_delete_elem,
649 .map_fd_get_ptr = prog_fd_array_get_ptr,
650 .map_fd_put_ptr = prog_fd_array_put_ptr,
651 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
652 .map_release_uref = bpf_fd_array_map_clear,
653 .map_seq_show_elem = prog_array_map_seq_show_elem,
656 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
657 struct file *map_file)
659 struct bpf_event_entry *ee;
661 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
663 ee->event = perf_file->private_data;
664 ee->perf_file = perf_file;
665 ee->map_file = map_file;
671 static void __bpf_event_entry_free(struct rcu_head *rcu)
673 struct bpf_event_entry *ee;
675 ee = container_of(rcu, struct bpf_event_entry, rcu);
680 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
682 call_rcu(&ee->rcu, __bpf_event_entry_free);
685 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
686 struct file *map_file, int fd)
688 struct bpf_event_entry *ee;
689 struct perf_event *event;
690 struct file *perf_file;
693 perf_file = perf_event_get(fd);
694 if (IS_ERR(perf_file))
697 ee = ERR_PTR(-EOPNOTSUPP);
698 event = perf_file->private_data;
699 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
702 ee = bpf_event_entry_gen(perf_file, map_file);
705 ee = ERR_PTR(-ENOMEM);
711 static void perf_event_fd_array_put_ptr(void *ptr)
713 bpf_event_entry_free_rcu(ptr);
716 static void perf_event_fd_array_release(struct bpf_map *map,
717 struct file *map_file)
719 struct bpf_array *array = container_of(map, struct bpf_array, map);
720 struct bpf_event_entry *ee;
724 for (i = 0; i < array->map.max_entries; i++) {
725 ee = READ_ONCE(array->ptrs[i]);
726 if (ee && ee->map_file == map_file)
727 fd_array_map_delete_elem(map, &i);
732 const struct bpf_map_ops perf_event_array_map_ops = {
733 .map_alloc_check = fd_array_map_alloc_check,
734 .map_alloc = array_map_alloc,
735 .map_free = fd_array_map_free,
736 .map_get_next_key = array_map_get_next_key,
737 .map_lookup_elem = fd_array_map_lookup_elem,
738 .map_delete_elem = fd_array_map_delete_elem,
739 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
740 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
741 .map_release = perf_event_fd_array_release,
742 .map_check_btf = map_check_no_btf,
745 #ifdef CONFIG_CGROUPS
746 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
747 struct file *map_file /* not used */,
750 return cgroup_get_from_fd(fd);
753 static void cgroup_fd_array_put_ptr(void *ptr)
755 /* cgroup_put free cgrp after a rcu grace period */
759 static void cgroup_fd_array_free(struct bpf_map *map)
761 bpf_fd_array_map_clear(map);
762 fd_array_map_free(map);
765 const struct bpf_map_ops cgroup_array_map_ops = {
766 .map_alloc_check = fd_array_map_alloc_check,
767 .map_alloc = array_map_alloc,
768 .map_free = cgroup_fd_array_free,
769 .map_get_next_key = array_map_get_next_key,
770 .map_lookup_elem = fd_array_map_lookup_elem,
771 .map_delete_elem = fd_array_map_delete_elem,
772 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
773 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
774 .map_check_btf = map_check_no_btf,
778 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
780 struct bpf_map *map, *inner_map_meta;
782 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
783 if (IS_ERR(inner_map_meta))
784 return inner_map_meta;
786 map = array_map_alloc(attr);
788 bpf_map_meta_free(inner_map_meta);
792 map->inner_map_meta = inner_map_meta;
797 static void array_of_map_free(struct bpf_map *map)
799 /* map->inner_map_meta is only accessed by syscall which
800 * is protected by fdget/fdput.
802 bpf_map_meta_free(map->inner_map_meta);
803 bpf_fd_array_map_clear(map);
804 fd_array_map_free(map);
807 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
809 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
814 return READ_ONCE(*inner_map);
817 static u32 array_of_map_gen_lookup(struct bpf_map *map,
818 struct bpf_insn *insn_buf)
820 struct bpf_array *array = container_of(map, struct bpf_array, map);
821 u32 elem_size = round_up(map->value_size, 8);
822 struct bpf_insn *insn = insn_buf;
823 const int ret = BPF_REG_0;
824 const int map_ptr = BPF_REG_1;
825 const int index = BPF_REG_2;
827 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
828 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
829 if (map->unpriv_array) {
830 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
831 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
833 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
835 if (is_power_of_2(elem_size))
836 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
838 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
839 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
840 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
841 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
842 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
843 *insn++ = BPF_MOV64_IMM(ret, 0);
845 return insn - insn_buf;
848 const struct bpf_map_ops array_of_maps_map_ops = {
849 .map_alloc_check = fd_array_map_alloc_check,
850 .map_alloc = array_of_map_alloc,
851 .map_free = array_of_map_free,
852 .map_get_next_key = array_map_get_next_key,
853 .map_lookup_elem = array_of_map_lookup_elem,
854 .map_delete_elem = fd_array_map_delete_elem,
855 .map_fd_get_ptr = bpf_map_fd_get_ptr,
856 .map_fd_put_ptr = bpf_map_fd_put_ptr,
857 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
858 .map_gen_lookup = array_of_map_gen_lookup,
859 .map_check_btf = map_check_no_btf,