1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/btf_ids.h>
14 #include "percpu_freelist.h"
15 #include "bpf_lru_list.h"
16 #include "map_in_map.h"
17 #include <linux/bpf_mem_alloc.h>
19 #define HTAB_CREATE_FLAG_MASK \
20 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
21 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
23 #define BATCH_OPS(_name) \
25 _name##_map_lookup_batch, \
26 .map_lookup_and_delete_batch = \
27 _name##_map_lookup_and_delete_batch, \
29 generic_map_update_batch, \
31 generic_map_delete_batch
34 * The bucket lock has two protection scopes:
36 * 1) Serializing concurrent operations from BPF programs on different
39 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
41 * BPF programs can execute in any context including perf, kprobes and
42 * tracing. As there are almost no limits where perf, kprobes and tracing
43 * can be invoked from the lock operations need to be protected against
44 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
45 * the lock held section when functions which acquire this lock are invoked
46 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
47 * variable bpf_prog_active, which prevents BPF programs attached to perf
48 * events, kprobes and tracing to be invoked before the prior invocation
49 * from one of these contexts completed. sys_bpf() uses the same mechanism
50 * by pinning the task to the current CPU and incrementing the recursion
51 * protection across the map operation.
53 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
54 * operations like memory allocations (even with GFP_ATOMIC) from atomic
55 * contexts. This is required because even with GFP_ATOMIC the memory
56 * allocator calls into code paths which acquire locks with long held lock
57 * sections. To ensure the deterministic behaviour these locks are regular
58 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
59 * true atomic contexts on an RT kernel are the low level hardware
60 * handling, scheduling, low level interrupt handling, NMIs etc. None of
61 * these contexts should ever do memory allocations.
63 * As regular device interrupt handlers and soft interrupts are forced into
64 * thread context, the existing code which does
65 * spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
68 * In theory the BPF locks could be converted to regular spinlocks as well,
69 * but the bucket locks and percpu_freelist locks can be taken from
70 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
71 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
72 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
73 * because there is no memory allocation within the lock held sections. However
74 * after hash map was fully converted to use bpf_mem_alloc, there will be
75 * non-synchronous memory allocation for non-preallocated hash map, so it is
76 * safe to always use raw spinlock for bucket lock.
79 struct hlist_nulls_head head;
80 raw_spinlock_t raw_lock;
83 #define HASHTAB_MAP_LOCK_COUNT 8
84 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
88 struct bpf_mem_alloc ma;
89 struct bpf_mem_alloc pcpu_ma;
90 struct bucket *buckets;
93 struct pcpu_freelist freelist;
96 struct htab_elem *__percpu *extra_elems;
97 /* number of elements in non-preallocated hashtable are kept
98 * in either pcount or count
100 struct percpu_counter pcount;
102 bool use_percpu_counter;
103 u32 n_buckets; /* number of hash buckets */
104 u32 elem_size; /* size of each element in bytes */
106 struct lock_class_key lockdep_key;
107 int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
110 /* each htab element is struct htab_elem + key + value */
113 struct hlist_nulls_node hash_node;
117 struct pcpu_freelist_node fnode;
118 struct htab_elem *batch_flink;
123 /* pointer to per-cpu pointer */
125 struct bpf_lru_node lru_node;
128 char key[] __aligned(8);
131 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
136 static void htab_init_buckets(struct bpf_htab *htab)
140 for (i = 0; i < htab->n_buckets; i++) {
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
142 raw_spin_lock_init(&htab->buckets[i].raw_lock);
143 lockdep_set_class(&htab->buckets[i].raw_lock,
149 static inline int htab_lock_bucket(const struct bpf_htab *htab,
150 struct bucket *b, u32 hash,
151 unsigned long *pflags)
155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
158 local_irq_save(flags);
159 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
160 __this_cpu_dec(*(htab->map_locked[hash]));
161 local_irq_restore(flags);
166 raw_spin_lock(&b->raw_lock);
172 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
173 struct bucket *b, u32 hash,
176 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
177 raw_spin_unlock(&b->raw_lock);
178 __this_cpu_dec(*(htab->map_locked[hash]));
179 local_irq_restore(flags);
183 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
185 static bool htab_is_lru(const struct bpf_htab *htab)
187 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
188 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
191 static bool htab_is_percpu(const struct bpf_htab *htab)
193 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
194 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
197 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
200 *(void __percpu **)(l->key + key_size) = pptr;
203 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
205 return *(void __percpu **)(l->key + key_size);
208 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
210 return *(void **)(l->key + roundup(map->key_size, 8));
213 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
215 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
218 static bool htab_has_extra_elems(struct bpf_htab *htab)
220 return !htab_is_percpu(htab) && !htab_is_lru(htab);
223 static void htab_free_prealloced_timers(struct bpf_htab *htab)
225 u32 num_entries = htab->map.max_entries;
228 if (!btf_record_has_field(htab->map.record, BPF_TIMER))
230 if (htab_has_extra_elems(htab))
231 num_entries += num_possible_cpus();
233 for (i = 0; i < num_entries; i++) {
234 struct htab_elem *elem;
236 elem = get_htab_elem(htab, i);
237 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
242 static void htab_free_prealloced_fields(struct bpf_htab *htab)
244 u32 num_entries = htab->map.max_entries;
247 if (IS_ERR_OR_NULL(htab->map.record))
249 if (htab_has_extra_elems(htab))
250 num_entries += num_possible_cpus();
251 for (i = 0; i < num_entries; i++) {
252 struct htab_elem *elem;
254 elem = get_htab_elem(htab, i);
255 if (htab_is_percpu(htab)) {
256 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
259 for_each_possible_cpu(cpu) {
260 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
264 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
271 static void htab_free_elems(struct bpf_htab *htab)
275 if (!htab_is_percpu(htab))
278 for (i = 0; i < htab->map.max_entries; i++) {
281 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
287 bpf_map_area_free(htab->elems);
290 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
291 * (bucket_lock). If both locks need to be acquired together, the lock
292 * order is always lru_lock -> bucket_lock and this only happens in
293 * bpf_lru_list.c logic. For example, certain code path of
294 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
295 * will acquire lru_lock first followed by acquiring bucket_lock.
297 * In hashtab.c, to avoid deadlock, lock acquisition of
298 * bucket_lock followed by lru_lock is not allowed. In such cases,
299 * bucket_lock needs to be released first before acquiring lru_lock.
301 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
304 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
308 bpf_map_inc_elem_count(&htab->map);
309 l = container_of(node, struct htab_elem, lru_node);
310 memcpy(l->key, key, htab->map.key_size);
317 static int prealloc_init(struct bpf_htab *htab)
319 u32 num_entries = htab->map.max_entries;
320 int err = -ENOMEM, i;
322 if (htab_has_extra_elems(htab))
323 num_entries += num_possible_cpus();
325 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
326 htab->map.numa_node);
330 if (!htab_is_percpu(htab))
331 goto skip_percpu_elems;
333 for (i = 0; i < num_entries; i++) {
334 u32 size = round_up(htab->map.value_size, 8);
337 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
338 GFP_USER | __GFP_NOWARN);
341 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
347 if (htab_is_lru(htab))
348 err = bpf_lru_init(&htab->lru,
349 htab->map.map_flags & BPF_F_NO_COMMON_LRU,
350 offsetof(struct htab_elem, hash) -
351 offsetof(struct htab_elem, lru_node),
352 htab_lru_map_delete_node,
355 err = pcpu_freelist_init(&htab->freelist);
360 if (htab_is_lru(htab))
361 bpf_lru_populate(&htab->lru, htab->elems,
362 offsetof(struct htab_elem, lru_node),
363 htab->elem_size, num_entries);
365 pcpu_freelist_populate(&htab->freelist,
366 htab->elems + offsetof(struct htab_elem, fnode),
367 htab->elem_size, num_entries);
372 htab_free_elems(htab);
376 static void prealloc_destroy(struct bpf_htab *htab)
378 htab_free_elems(htab);
380 if (htab_is_lru(htab))
381 bpf_lru_destroy(&htab->lru);
383 pcpu_freelist_destroy(&htab->freelist);
386 static int alloc_extra_elems(struct bpf_htab *htab)
388 struct htab_elem *__percpu *pptr, *l_new;
389 struct pcpu_freelist_node *l;
392 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
393 GFP_USER | __GFP_NOWARN);
397 for_each_possible_cpu(cpu) {
398 l = pcpu_freelist_pop(&htab->freelist);
399 /* pop will succeed, since prealloc_init()
400 * preallocated extra num_possible_cpus elements
402 l_new = container_of(l, struct htab_elem, fnode);
403 *per_cpu_ptr(pptr, cpu) = l_new;
405 htab->extra_elems = pptr;
409 /* Called from syscall */
410 static int htab_map_alloc_check(union bpf_attr *attr)
412 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
413 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
414 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
415 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
416 /* percpu_lru means each cpu has its own LRU list.
417 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
418 * the map's value itself is percpu. percpu_lru has
419 * nothing to do with the map's value.
421 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
422 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
423 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
424 int numa_node = bpf_map_attr_numa_node(attr);
426 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
427 offsetof(struct htab_elem, hash_node.pprev));
429 if (zero_seed && !capable(CAP_SYS_ADMIN))
430 /* Guard against local DoS, and discourage production use. */
433 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
434 !bpf_map_flags_access_ok(attr->map_flags))
437 if (!lru && percpu_lru)
440 if (lru && !prealloc)
443 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
446 /* check sanity of attributes.
447 * value_size == 0 may be allowed in the future to use map as a set
449 if (attr->max_entries == 0 || attr->key_size == 0 ||
450 attr->value_size == 0)
453 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
454 sizeof(struct htab_elem))
455 /* if key_size + value_size is bigger, the user space won't be
456 * able to access the elements via bpf syscall. This check
457 * also makes sure that the elem_size doesn't overflow and it's
458 * kmalloc-able later in htab_map_update_elem()
465 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
467 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
468 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
469 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
470 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
471 /* percpu_lru means each cpu has its own LRU list.
472 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
473 * the map's value itself is percpu. percpu_lru has
474 * nothing to do with the map's value.
476 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
477 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
478 struct bpf_htab *htab;
481 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
483 return ERR_PTR(-ENOMEM);
485 lockdep_register_key(&htab->lockdep_key);
487 bpf_map_init_from_attr(&htab->map, attr);
490 /* ensure each CPU's lru list has >=1 elements.
491 * since we are at it, make each lru list has the same
492 * number of elements.
494 htab->map.max_entries = roundup(attr->max_entries,
495 num_possible_cpus());
496 if (htab->map.max_entries < attr->max_entries)
497 htab->map.max_entries = rounddown(attr->max_entries,
498 num_possible_cpus());
501 /* hash table size must be power of 2 */
502 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
504 htab->elem_size = sizeof(struct htab_elem) +
505 round_up(htab->map.key_size, 8);
507 htab->elem_size += sizeof(void *);
509 htab->elem_size += round_up(htab->map.value_size, 8);
512 /* prevent zero size kmalloc and check for u32 overflow */
513 if (htab->n_buckets == 0 ||
514 htab->n_buckets > U32_MAX / sizeof(struct bucket))
517 err = bpf_map_init_elem_count(&htab->map);
522 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
523 sizeof(struct bucket),
524 htab->map.numa_node);
526 goto free_elem_count;
528 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
529 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
533 if (!htab->map_locked[i])
534 goto free_map_locked;
537 if (htab->map.map_flags & BPF_F_ZERO_SEED)
540 htab->hashrnd = get_random_u32();
542 htab_init_buckets(htab);
544 /* compute_batch_value() computes batch value as num_online_cpus() * 2
545 * and __percpu_counter_compare() needs
546 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
547 * for percpu_counter to be faster than atomic_t. In practice the average bpf
548 * hash map size is 10k, which means that a system with 64 cpus will fill
549 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
550 * define our own batch count as 32 then 10k hash map can be filled up to 80%:
551 * 10k - 8k > 32 _batch_ * 64 _cpus_
552 * and __percpu_counter_compare() will still be fast. At that point hash map
553 * collisions will dominate its performance anyway. Assume that hash map filled
554 * to 50+% isn't going to be O(1) and use the following formula to choose
555 * between percpu_counter and atomic_t.
557 #define PERCPU_COUNTER_BATCH 32
558 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
559 htab->use_percpu_counter = true;
561 if (htab->use_percpu_counter) {
562 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
564 goto free_map_locked;
568 err = prealloc_init(htab);
570 goto free_map_locked;
572 if (!percpu && !lru) {
573 /* lru itself can remove the least used element, so
574 * there is no need for an extra elem during map_update.
576 err = alloc_extra_elems(htab);
581 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
583 goto free_map_locked;
585 err = bpf_mem_alloc_init(&htab->pcpu_ma,
586 round_up(htab->map.value_size, 8), true);
588 goto free_map_locked;
595 prealloc_destroy(htab);
597 if (htab->use_percpu_counter)
598 percpu_counter_destroy(&htab->pcount);
599 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
600 free_percpu(htab->map_locked[i]);
601 bpf_map_area_free(htab->buckets);
602 bpf_mem_alloc_destroy(&htab->pcpu_ma);
603 bpf_mem_alloc_destroy(&htab->ma);
605 bpf_map_free_elem_count(&htab->map);
607 lockdep_unregister_key(&htab->lockdep_key);
608 bpf_map_area_free(htab);
612 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
614 if (likely(key_len % 4 == 0))
615 return jhash2(key, key_len / 4, hashrnd);
616 return jhash(key, key_len, hashrnd);
619 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
621 return &htab->buckets[hash & (htab->n_buckets - 1)];
624 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
626 return &__select_bucket(htab, hash)->head;
629 /* this lookup function can only be called with bucket lock taken */
630 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
631 void *key, u32 key_size)
633 struct hlist_nulls_node *n;
636 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
637 if (l->hash == hash && !memcmp(&l->key, key, key_size))
643 /* can be called without bucket lock. it will repeat the loop in
644 * the unlikely event when elements moved from one bucket into another
645 * while link list is being walked
647 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
649 u32 key_size, u32 n_buckets)
651 struct hlist_nulls_node *n;
655 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
656 if (l->hash == hash && !memcmp(&l->key, key, key_size))
659 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
665 /* Called from syscall or from eBPF program directly, so
666 * arguments have to match bpf_map_lookup_elem() exactly.
667 * The return value is adjusted by BPF instructions
668 * in htab_map_gen_lookup().
670 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
672 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
673 struct hlist_nulls_head *head;
677 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
678 !rcu_read_lock_bh_held());
680 key_size = map->key_size;
682 hash = htab_map_hash(key, key_size, htab->hashrnd);
684 head = select_bucket(htab, hash);
686 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
691 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
693 struct htab_elem *l = __htab_map_lookup_elem(map, key);
696 return l->key + round_up(map->key_size, 8);
701 /* inline bpf_map_lookup_elem() call.
704 * bpf_map_lookup_elem
705 * map->ops->map_lookup_elem
706 * htab_map_lookup_elem
707 * __htab_map_lookup_elem
710 * __htab_map_lookup_elem
712 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
714 struct bpf_insn *insn = insn_buf;
715 const int ret = BPF_REG_0;
717 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
718 (void *(*)(struct bpf_map *map, void *key))NULL));
719 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
720 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
721 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
722 offsetof(struct htab_elem, key) +
723 round_up(map->key_size, 8));
724 return insn - insn_buf;
727 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
728 void *key, const bool mark)
730 struct htab_elem *l = __htab_map_lookup_elem(map, key);
734 bpf_lru_node_set_ref(&l->lru_node);
735 return l->key + round_up(map->key_size, 8);
741 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
743 return __htab_lru_map_lookup_elem(map, key, true);
746 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
748 return __htab_lru_map_lookup_elem(map, key, false);
751 static int htab_lru_map_gen_lookup(struct bpf_map *map,
752 struct bpf_insn *insn_buf)
754 struct bpf_insn *insn = insn_buf;
755 const int ret = BPF_REG_0;
756 const int ref_reg = BPF_REG_1;
758 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
759 (void *(*)(struct bpf_map *map, void *key))NULL));
760 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
761 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
762 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
763 offsetof(struct htab_elem, lru_node) +
764 offsetof(struct bpf_lru_node, ref));
765 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
766 *insn++ = BPF_ST_MEM(BPF_B, ret,
767 offsetof(struct htab_elem, lru_node) +
768 offsetof(struct bpf_lru_node, ref),
770 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
771 offsetof(struct htab_elem, key) +
772 round_up(map->key_size, 8));
773 return insn - insn_buf;
776 static void check_and_free_fields(struct bpf_htab *htab,
777 struct htab_elem *elem)
779 if (htab_is_percpu(htab)) {
780 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
783 for_each_possible_cpu(cpu)
784 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
786 void *map_value = elem->key + round_up(htab->map.key_size, 8);
788 bpf_obj_free_fields(htab->map.record, map_value);
792 /* It is called from the bpf_lru_list when the LRU needs to delete
793 * older elements from the htab.
795 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
797 struct bpf_htab *htab = arg;
798 struct htab_elem *l = NULL, *tgt_l;
799 struct hlist_nulls_head *head;
800 struct hlist_nulls_node *n;
805 tgt_l = container_of(node, struct htab_elem, lru_node);
806 b = __select_bucket(htab, tgt_l->hash);
809 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
813 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
815 hlist_nulls_del_rcu(&l->hash_node);
816 check_and_free_fields(htab, l);
817 bpf_map_dec_elem_count(&htab->map);
821 htab_unlock_bucket(htab, b, tgt_l->hash, flags);
826 /* Called from syscall */
827 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
829 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
830 struct hlist_nulls_head *head;
831 struct htab_elem *l, *next_l;
835 WARN_ON_ONCE(!rcu_read_lock_held());
837 key_size = map->key_size;
840 goto find_first_elem;
842 hash = htab_map_hash(key, key_size, htab->hashrnd);
844 head = select_bucket(htab, hash);
847 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
850 goto find_first_elem;
852 /* key was found, get next key in the same bucket */
853 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
854 struct htab_elem, hash_node);
857 /* if next elem in this hash list is non-zero, just return it */
858 memcpy(next_key, next_l->key, key_size);
862 /* no more elements in this hash list, go to the next bucket */
863 i = hash & (htab->n_buckets - 1);
867 /* iterate over buckets */
868 for (; i < htab->n_buckets; i++) {
869 head = select_bucket(htab, i);
871 /* pick first element in the bucket */
872 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
873 struct htab_elem, hash_node);
875 /* if it's not empty, just return it */
876 memcpy(next_key, next_l->key, key_size);
881 /* iterated over all buckets and all elements */
885 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
887 check_and_free_fields(htab, l);
888 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
889 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
890 bpf_mem_cache_free(&htab->ma, l);
893 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
895 struct bpf_map *map = &htab->map;
898 if (map->ops->map_fd_put_ptr) {
899 ptr = fd_htab_map_get_ptr(map, l);
900 map->ops->map_fd_put_ptr(ptr);
904 static bool is_map_full(struct bpf_htab *htab)
906 if (htab->use_percpu_counter)
907 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
908 PERCPU_COUNTER_BATCH) >= 0;
909 return atomic_read(&htab->count) >= htab->map.max_entries;
912 static void inc_elem_count(struct bpf_htab *htab)
914 bpf_map_inc_elem_count(&htab->map);
916 if (htab->use_percpu_counter)
917 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
919 atomic_inc(&htab->count);
922 static void dec_elem_count(struct bpf_htab *htab)
924 bpf_map_dec_elem_count(&htab->map);
926 if (htab->use_percpu_counter)
927 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
929 atomic_dec(&htab->count);
933 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
935 htab_put_fd_value(htab, l);
937 if (htab_is_prealloc(htab)) {
938 bpf_map_dec_elem_count(&htab->map);
939 check_and_free_fields(htab, l);
940 __pcpu_freelist_push(&htab->freelist, &l->fnode);
942 dec_elem_count(htab);
943 htab_elem_free(htab, l);
947 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
948 void *value, bool onallcpus)
951 /* copy true value_size bytes */
952 copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
954 u32 size = round_up(htab->map.value_size, 8);
957 for_each_possible_cpu(cpu) {
958 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
964 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
965 void *value, bool onallcpus)
967 /* When not setting the initial value on all cpus, zero-fill element
968 * values for other cpus. Otherwise, bpf program has no way to ensure
969 * known initial values for cpus other than current one
970 * (onallcpus=false always when coming from bpf prog).
973 int current_cpu = raw_smp_processor_id();
976 for_each_possible_cpu(cpu) {
977 if (cpu == current_cpu)
978 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
979 else /* Since elem is preallocated, we cannot touch special fields */
980 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
983 pcpu_copy_value(htab, pptr, value, onallcpus);
987 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
989 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
993 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
994 void *value, u32 key_size, u32 hash,
995 bool percpu, bool onallcpus,
996 struct htab_elem *old_elem)
998 u32 size = htab->map.value_size;
999 bool prealloc = htab_is_prealloc(htab);
1000 struct htab_elem *l_new, **pl_new;
1001 void __percpu *pptr;
1005 /* if we're updating the existing element,
1006 * use per-cpu extra elems to avoid freelist_pop/push
1008 pl_new = this_cpu_ptr(htab->extra_elems);
1010 htab_put_fd_value(htab, old_elem);
1013 struct pcpu_freelist_node *l;
1015 l = __pcpu_freelist_pop(&htab->freelist);
1017 return ERR_PTR(-E2BIG);
1018 l_new = container_of(l, struct htab_elem, fnode);
1019 bpf_map_inc_elem_count(&htab->map);
1022 if (is_map_full(htab))
1024 /* when map is full and update() is replacing
1025 * old element, it's ok to allocate, since
1026 * old element will be freed immediately.
1027 * Otherwise return an error
1029 return ERR_PTR(-E2BIG);
1030 inc_elem_count(htab);
1031 l_new = bpf_mem_cache_alloc(&htab->ma);
1033 l_new = ERR_PTR(-ENOMEM);
1038 memcpy(l_new->key, key, key_size);
1041 pptr = htab_elem_get_ptr(l_new, key_size);
1043 /* alloc_percpu zero-fills */
1044 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1046 bpf_mem_cache_free(&htab->ma, l_new);
1047 l_new = ERR_PTR(-ENOMEM);
1050 l_new->ptr_to_pptr = pptr;
1051 pptr = *(void **)pptr;
1054 pcpu_init_value(htab, pptr, value, onallcpus);
1057 htab_elem_set_ptr(l_new, key_size, pptr);
1058 } else if (fd_htab_map_needs_adjust(htab)) {
1059 size = round_up(size, 8);
1060 memcpy(l_new->key + round_up(key_size, 8), value, size);
1062 copy_map_value(&htab->map,
1063 l_new->key + round_up(key_size, 8),
1070 dec_elem_count(htab);
1074 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1077 if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1078 /* elem already exists */
1081 if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1082 /* elem doesn't exist, cannot update it */
1088 /* Called from syscall or from eBPF program */
1089 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1092 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1093 struct htab_elem *l_new = NULL, *l_old;
1094 struct hlist_nulls_head *head;
1095 unsigned long flags;
1100 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1104 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1105 !rcu_read_lock_bh_held());
1107 key_size = map->key_size;
1109 hash = htab_map_hash(key, key_size, htab->hashrnd);
1111 b = __select_bucket(htab, hash);
1114 if (unlikely(map_flags & BPF_F_LOCK)) {
1115 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1117 /* find an element without taking the bucket lock */
1118 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1120 ret = check_flags(htab, l_old, map_flags);
1124 /* grab the element lock and update value in place */
1125 copy_map_value_locked(map,
1126 l_old->key + round_up(key_size, 8),
1130 /* fall through, grab the bucket lock and lookup again.
1131 * 99.9% chance that the element won't be found,
1132 * but second lookup under lock has to be done.
1136 ret = htab_lock_bucket(htab, b, hash, &flags);
1140 l_old = lookup_elem_raw(head, hash, key, key_size);
1142 ret = check_flags(htab, l_old, map_flags);
1146 if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1147 /* first lookup without the bucket lock didn't find the element,
1148 * but second lookup with the bucket lock found it.
1149 * This case is highly unlikely, but has to be dealt with:
1150 * grab the element lock in addition to the bucket lock
1151 * and update element in place
1153 copy_map_value_locked(map,
1154 l_old->key + round_up(key_size, 8),
1160 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1162 if (IS_ERR(l_new)) {
1163 /* all pre-allocated elements are in use or memory exhausted */
1164 ret = PTR_ERR(l_new);
1168 /* add new element to the head of the list, so that
1169 * concurrent search will find it before old elem
1171 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1173 hlist_nulls_del_rcu(&l_old->hash_node);
1174 if (!htab_is_prealloc(htab))
1175 free_htab_elem(htab, l_old);
1177 check_and_free_fields(htab, l_old);
1181 htab_unlock_bucket(htab, b, hash, flags);
1185 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1187 check_and_free_fields(htab, elem);
1188 bpf_map_dec_elem_count(&htab->map);
1189 bpf_lru_push_free(&htab->lru, &elem->lru_node);
1192 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1195 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1196 struct htab_elem *l_new, *l_old = NULL;
1197 struct hlist_nulls_head *head;
1198 unsigned long flags;
1203 if (unlikely(map_flags > BPF_EXIST))
1207 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1208 !rcu_read_lock_bh_held());
1210 key_size = map->key_size;
1212 hash = htab_map_hash(key, key_size, htab->hashrnd);
1214 b = __select_bucket(htab, hash);
1217 /* For LRU, we need to alloc before taking bucket's
1218 * spinlock because getting free nodes from LRU may need
1219 * to remove older elements from htab and this removal
1220 * operation will need a bucket lock.
1222 l_new = prealloc_lru_pop(htab, key, hash);
1225 copy_map_value(&htab->map,
1226 l_new->key + round_up(map->key_size, 8), value);
1228 ret = htab_lock_bucket(htab, b, hash, &flags);
1230 goto err_lock_bucket;
1232 l_old = lookup_elem_raw(head, hash, key, key_size);
1234 ret = check_flags(htab, l_old, map_flags);
1238 /* add new element to the head of the list, so that
1239 * concurrent search will find it before old elem
1241 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1243 bpf_lru_node_set_ref(&l_new->lru_node);
1244 hlist_nulls_del_rcu(&l_old->hash_node);
1249 htab_unlock_bucket(htab, b, hash, flags);
1253 htab_lru_push_free(htab, l_new);
1255 htab_lru_push_free(htab, l_old);
1260 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1261 void *value, u64 map_flags,
1264 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1265 struct htab_elem *l_new = NULL, *l_old;
1266 struct hlist_nulls_head *head;
1267 unsigned long flags;
1272 if (unlikely(map_flags > BPF_EXIST))
1276 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1277 !rcu_read_lock_bh_held());
1279 key_size = map->key_size;
1281 hash = htab_map_hash(key, key_size, htab->hashrnd);
1283 b = __select_bucket(htab, hash);
1286 ret = htab_lock_bucket(htab, b, hash, &flags);
1290 l_old = lookup_elem_raw(head, hash, key, key_size);
1292 ret = check_flags(htab, l_old, map_flags);
1297 /* per-cpu hash map can update value in-place */
1298 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1301 l_new = alloc_htab_elem(htab, key, value, key_size,
1302 hash, true, onallcpus, NULL);
1303 if (IS_ERR(l_new)) {
1304 ret = PTR_ERR(l_new);
1307 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1311 htab_unlock_bucket(htab, b, hash, flags);
1315 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1316 void *value, u64 map_flags,
1319 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1320 struct htab_elem *l_new = NULL, *l_old;
1321 struct hlist_nulls_head *head;
1322 unsigned long flags;
1327 if (unlikely(map_flags > BPF_EXIST))
1331 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1332 !rcu_read_lock_bh_held());
1334 key_size = map->key_size;
1336 hash = htab_map_hash(key, key_size, htab->hashrnd);
1338 b = __select_bucket(htab, hash);
1341 /* For LRU, we need to alloc before taking bucket's
1342 * spinlock because LRU's elem alloc may need
1343 * to remove older elem from htab and this removal
1344 * operation will need a bucket lock.
1346 if (map_flags != BPF_EXIST) {
1347 l_new = prealloc_lru_pop(htab, key, hash);
1352 ret = htab_lock_bucket(htab, b, hash, &flags);
1354 goto err_lock_bucket;
1356 l_old = lookup_elem_raw(head, hash, key, key_size);
1358 ret = check_flags(htab, l_old, map_flags);
1363 bpf_lru_node_set_ref(&l_old->lru_node);
1365 /* per-cpu hash map can update value in-place */
1366 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1369 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1371 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1376 htab_unlock_bucket(htab, b, hash, flags);
1379 bpf_map_dec_elem_count(&htab->map);
1380 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1385 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1386 void *value, u64 map_flags)
1388 return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1391 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1392 void *value, u64 map_flags)
1394 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1398 /* Called from syscall or from eBPF program */
1399 static long htab_map_delete_elem(struct bpf_map *map, void *key)
1401 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1402 struct hlist_nulls_head *head;
1404 struct htab_elem *l;
1405 unsigned long flags;
1409 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1410 !rcu_read_lock_bh_held());
1412 key_size = map->key_size;
1414 hash = htab_map_hash(key, key_size, htab->hashrnd);
1415 b = __select_bucket(htab, hash);
1418 ret = htab_lock_bucket(htab, b, hash, &flags);
1422 l = lookup_elem_raw(head, hash, key, key_size);
1425 hlist_nulls_del_rcu(&l->hash_node);
1426 free_htab_elem(htab, l);
1431 htab_unlock_bucket(htab, b, hash, flags);
1435 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1437 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1438 struct hlist_nulls_head *head;
1440 struct htab_elem *l;
1441 unsigned long flags;
1445 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1446 !rcu_read_lock_bh_held());
1448 key_size = map->key_size;
1450 hash = htab_map_hash(key, key_size, htab->hashrnd);
1451 b = __select_bucket(htab, hash);
1454 ret = htab_lock_bucket(htab, b, hash, &flags);
1458 l = lookup_elem_raw(head, hash, key, key_size);
1461 hlist_nulls_del_rcu(&l->hash_node);
1465 htab_unlock_bucket(htab, b, hash, flags);
1467 htab_lru_push_free(htab, l);
1471 static void delete_all_elements(struct bpf_htab *htab)
1475 /* It's called from a worker thread, so disable migration here,
1476 * since bpf_mem_cache_free() relies on that.
1479 for (i = 0; i < htab->n_buckets; i++) {
1480 struct hlist_nulls_head *head = select_bucket(htab, i);
1481 struct hlist_nulls_node *n;
1482 struct htab_elem *l;
1484 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1485 hlist_nulls_del_rcu(&l->hash_node);
1486 htab_elem_free(htab, l);
1492 static void htab_free_malloced_timers(struct bpf_htab *htab)
1497 for (i = 0; i < htab->n_buckets; i++) {
1498 struct hlist_nulls_head *head = select_bucket(htab, i);
1499 struct hlist_nulls_node *n;
1500 struct htab_elem *l;
1502 hlist_nulls_for_each_entry(l, n, head, hash_node) {
1503 /* We only free timer on uref dropping to zero */
1504 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8));
1511 static void htab_map_free_timers(struct bpf_map *map)
1513 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1515 /* We only free timer on uref dropping to zero */
1516 if (!btf_record_has_field(htab->map.record, BPF_TIMER))
1518 if (!htab_is_prealloc(htab))
1519 htab_free_malloced_timers(htab);
1521 htab_free_prealloced_timers(htab);
1524 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1525 static void htab_map_free(struct bpf_map *map)
1527 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1530 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1531 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1532 * There is no need to synchronize_rcu() here to protect map elements.
1535 /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1536 * underneath and is reponsible for waiting for callbacks to finish
1537 * during bpf_mem_alloc_destroy().
1539 if (!htab_is_prealloc(htab)) {
1540 delete_all_elements(htab);
1542 htab_free_prealloced_fields(htab);
1543 prealloc_destroy(htab);
1546 bpf_map_free_elem_count(map);
1547 free_percpu(htab->extra_elems);
1548 bpf_map_area_free(htab->buckets);
1549 bpf_mem_alloc_destroy(&htab->pcpu_ma);
1550 bpf_mem_alloc_destroy(&htab->ma);
1551 if (htab->use_percpu_counter)
1552 percpu_counter_destroy(&htab->pcount);
1553 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1554 free_percpu(htab->map_locked[i]);
1555 lockdep_unregister_key(&htab->lockdep_key);
1556 bpf_map_area_free(htab);
1559 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1566 value = htab_map_lookup_elem(map, key);
1572 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1574 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1580 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1581 void *value, bool is_lru_map,
1582 bool is_percpu, u64 flags)
1584 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1585 struct hlist_nulls_head *head;
1586 unsigned long bflags;
1587 struct htab_elem *l;
1592 key_size = map->key_size;
1594 hash = htab_map_hash(key, key_size, htab->hashrnd);
1595 b = __select_bucket(htab, hash);
1598 ret = htab_lock_bucket(htab, b, hash, &bflags);
1602 l = lookup_elem_raw(head, hash, key, key_size);
1607 u32 roundup_value_size = round_up(map->value_size, 8);
1608 void __percpu *pptr;
1611 pptr = htab_elem_get_ptr(l, key_size);
1612 for_each_possible_cpu(cpu) {
1613 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
1614 check_and_init_map_value(&htab->map, value + off);
1615 off += roundup_value_size;
1618 u32 roundup_key_size = round_up(map->key_size, 8);
1620 if (flags & BPF_F_LOCK)
1621 copy_map_value_locked(map, value, l->key +
1625 copy_map_value(map, value, l->key +
1627 /* Zeroing special fields in the temp buffer */
1628 check_and_init_map_value(map, value);
1631 hlist_nulls_del_rcu(&l->hash_node);
1633 free_htab_elem(htab, l);
1636 htab_unlock_bucket(htab, b, hash, bflags);
1638 if (is_lru_map && l)
1639 htab_lru_push_free(htab, l);
1644 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1645 void *value, u64 flags)
1647 return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1651 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1652 void *key, void *value,
1655 return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1659 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1660 void *value, u64 flags)
1662 return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1666 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1667 void *key, void *value,
1670 return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1675 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1676 const union bpf_attr *attr,
1677 union bpf_attr __user *uattr,
1678 bool do_delete, bool is_lru_map,
1681 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1682 u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1683 void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1684 void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1685 void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1686 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1687 u32 batch, max_count, size, bucket_size, map_id;
1688 struct htab_elem *node_to_free = NULL;
1689 u64 elem_map_flags, map_flags;
1690 struct hlist_nulls_head *head;
1691 struct hlist_nulls_node *n;
1692 unsigned long flags = 0;
1693 bool locked = false;
1694 struct htab_elem *l;
1698 elem_map_flags = attr->batch.elem_flags;
1699 if ((elem_map_flags & ~BPF_F_LOCK) ||
1700 ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1703 map_flags = attr->batch.flags;
1707 max_count = attr->batch.count;
1711 if (put_user(0, &uattr->batch.count))
1715 if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1718 if (batch >= htab->n_buckets)
1721 key_size = htab->map.key_size;
1722 roundup_key_size = round_up(htab->map.key_size, 8);
1723 value_size = htab->map.value_size;
1724 size = round_up(value_size, 8);
1726 value_size = size * num_possible_cpus();
1728 /* while experimenting with hash tables with sizes ranging from 10 to
1729 * 1000, it was observed that a bucket can have up to 5 entries.
1734 /* We cannot do copy_from_user or copy_to_user inside
1735 * the rcu_read_lock. Allocate enough space here.
1737 keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1738 values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1739 if (!keys || !values) {
1745 bpf_disable_instrumentation();
1750 b = &htab->buckets[batch];
1752 /* do not grab the lock unless need it (bucket_cnt > 0). */
1754 ret = htab_lock_bucket(htab, b, batch, &flags);
1757 bpf_enable_instrumentation();
1763 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1766 if (bucket_cnt && !locked) {
1771 if (bucket_cnt > (max_count - total)) {
1774 /* Note that since bucket_cnt > 0 here, it is implicit
1775 * that the locked was grabbed, so release it.
1777 htab_unlock_bucket(htab, b, batch, flags);
1779 bpf_enable_instrumentation();
1783 if (bucket_cnt > bucket_size) {
1784 bucket_size = bucket_cnt;
1785 /* Note that since bucket_cnt > 0 here, it is implicit
1786 * that the locked was grabbed, so release it.
1788 htab_unlock_bucket(htab, b, batch, flags);
1790 bpf_enable_instrumentation();
1796 /* Next block is only safe to run if you have grabbed the lock */
1800 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1801 memcpy(dst_key, l->key, key_size);
1805 void __percpu *pptr;
1807 pptr = htab_elem_get_ptr(l, map->key_size);
1808 for_each_possible_cpu(cpu) {
1809 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
1810 check_and_init_map_value(&htab->map, dst_val + off);
1814 value = l->key + roundup_key_size;
1815 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1816 struct bpf_map **inner_map = value;
1818 /* Actual value is the id of the inner map */
1819 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1823 if (elem_map_flags & BPF_F_LOCK)
1824 copy_map_value_locked(map, dst_val, value,
1827 copy_map_value(map, dst_val, value);
1828 /* Zeroing special fields in the temp buffer */
1829 check_and_init_map_value(map, dst_val);
1832 hlist_nulls_del_rcu(&l->hash_node);
1834 /* bpf_lru_push_free() will acquire lru_lock, which
1835 * may cause deadlock. See comments in function
1836 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1837 * after releasing the bucket lock.
1840 l->batch_flink = node_to_free;
1843 free_htab_elem(htab, l);
1846 dst_key += key_size;
1847 dst_val += value_size;
1850 htab_unlock_bucket(htab, b, batch, flags);
1853 while (node_to_free) {
1855 node_to_free = node_to_free->batch_flink;
1856 htab_lru_push_free(htab, l);
1860 /* If we are not copying data, we can go to next bucket and avoid
1861 * unlocking the rcu.
1863 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1869 bpf_enable_instrumentation();
1870 if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1871 key_size * bucket_cnt) ||
1872 copy_to_user(uvalues + total * value_size, values,
1873 value_size * bucket_cnt))) {
1878 total += bucket_cnt;
1880 if (batch >= htab->n_buckets) {
1890 /* copy # of entries and next batch */
1891 ubatch = u64_to_user_ptr(attr->batch.out_batch);
1892 if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1893 put_user(total, &uattr->batch.count))
1903 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1904 union bpf_attr __user *uattr)
1906 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1911 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1912 const union bpf_attr *attr,
1913 union bpf_attr __user *uattr)
1915 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1920 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1921 union bpf_attr __user *uattr)
1923 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1928 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1929 const union bpf_attr *attr,
1930 union bpf_attr __user *uattr)
1932 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1937 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1938 const union bpf_attr *attr,
1939 union bpf_attr __user *uattr)
1941 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1946 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1947 const union bpf_attr *attr,
1948 union bpf_attr __user *uattr)
1950 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1955 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1956 union bpf_attr __user *uattr)
1958 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1963 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1964 const union bpf_attr *attr,
1965 union bpf_attr __user *uattr)
1967 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1971 struct bpf_iter_seq_hash_map_info {
1972 struct bpf_map *map;
1973 struct bpf_htab *htab;
1974 void *percpu_value_buf; // non-zero means percpu hash
1979 static struct htab_elem *
1980 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
1981 struct htab_elem *prev_elem)
1983 const struct bpf_htab *htab = info->htab;
1984 u32 skip_elems = info->skip_elems;
1985 u32 bucket_id = info->bucket_id;
1986 struct hlist_nulls_head *head;
1987 struct hlist_nulls_node *n;
1988 struct htab_elem *elem;
1992 if (bucket_id >= htab->n_buckets)
1995 /* try to find next elem in the same bucket */
1997 /* no update/deletion on this bucket, prev_elem should be still valid
1998 * and we won't skip elements.
2000 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
2001 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
2005 /* not found, unlock and go to the next bucket */
2006 b = &htab->buckets[bucket_id++];
2011 for (i = bucket_id; i < htab->n_buckets; i++) {
2012 b = &htab->buckets[i];
2017 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2018 if (count >= skip_elems) {
2019 info->bucket_id = i;
2020 info->skip_elems = count;
2030 info->bucket_id = i;
2031 info->skip_elems = 0;
2035 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2037 struct bpf_iter_seq_hash_map_info *info = seq->private;
2038 struct htab_elem *elem;
2040 elem = bpf_hash_map_seq_find_next(info, NULL);
2049 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2051 struct bpf_iter_seq_hash_map_info *info = seq->private;
2055 return bpf_hash_map_seq_find_next(info, v);
2058 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2060 struct bpf_iter_seq_hash_map_info *info = seq->private;
2061 u32 roundup_key_size, roundup_value_size;
2062 struct bpf_iter__bpf_map_elem ctx = {};
2063 struct bpf_map *map = info->map;
2064 struct bpf_iter_meta meta;
2065 int ret = 0, off = 0, cpu;
2066 struct bpf_prog *prog;
2067 void __percpu *pptr;
2070 prog = bpf_iter_get_info(&meta, elem == NULL);
2073 ctx.map = info->map;
2075 roundup_key_size = round_up(map->key_size, 8);
2076 ctx.key = elem->key;
2077 if (!info->percpu_value_buf) {
2078 ctx.value = elem->key + roundup_key_size;
2080 roundup_value_size = round_up(map->value_size, 8);
2081 pptr = htab_elem_get_ptr(elem, map->key_size);
2082 for_each_possible_cpu(cpu) {
2083 copy_map_value_long(map, info->percpu_value_buf + off,
2084 per_cpu_ptr(pptr, cpu));
2085 check_and_init_map_value(map, info->percpu_value_buf + off);
2086 off += roundup_value_size;
2088 ctx.value = info->percpu_value_buf;
2091 ret = bpf_iter_run_prog(prog, &ctx);
2097 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2099 return __bpf_hash_map_seq_show(seq, v);
2102 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2105 (void)__bpf_hash_map_seq_show(seq, NULL);
2110 static int bpf_iter_init_hash_map(void *priv_data,
2111 struct bpf_iter_aux_info *aux)
2113 struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2114 struct bpf_map *map = aux->map;
2118 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2119 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2120 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2121 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2125 seq_info->percpu_value_buf = value_buf;
2128 bpf_map_inc_with_uref(map);
2129 seq_info->map = map;
2130 seq_info->htab = container_of(map, struct bpf_htab, map);
2134 static void bpf_iter_fini_hash_map(void *priv_data)
2136 struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2138 bpf_map_put_with_uref(seq_info->map);
2139 kfree(seq_info->percpu_value_buf);
2142 static const struct seq_operations bpf_hash_map_seq_ops = {
2143 .start = bpf_hash_map_seq_start,
2144 .next = bpf_hash_map_seq_next,
2145 .stop = bpf_hash_map_seq_stop,
2146 .show = bpf_hash_map_seq_show,
2149 static const struct bpf_iter_seq_info iter_seq_info = {
2150 .seq_ops = &bpf_hash_map_seq_ops,
2151 .init_seq_private = bpf_iter_init_hash_map,
2152 .fini_seq_private = bpf_iter_fini_hash_map,
2153 .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
2156 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2157 void *callback_ctx, u64 flags)
2159 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2160 struct hlist_nulls_head *head;
2161 struct hlist_nulls_node *n;
2162 struct htab_elem *elem;
2163 u32 roundup_key_size;
2164 int i, num_elems = 0;
2165 void __percpu *pptr;
2174 is_percpu = htab_is_percpu(htab);
2176 roundup_key_size = round_up(map->key_size, 8);
2177 /* disable migration so percpu value prepared here will be the
2178 * same as the one seen by the bpf program with bpf_map_lookup_elem().
2182 for (i = 0; i < htab->n_buckets; i++) {
2183 b = &htab->buckets[i];
2186 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2189 /* current cpu value for percpu map */
2190 pptr = htab_elem_get_ptr(elem, map->key_size);
2191 val = this_cpu_ptr(pptr);
2193 val = elem->key + roundup_key_size;
2196 ret = callback_fn((u64)(long)map, (u64)(long)key,
2197 (u64)(long)val, (u64)(long)callback_ctx, 0);
2198 /* return value: 0 - continue, 1 - stop and return */
2212 static u64 htab_map_mem_usage(const struct bpf_map *map)
2214 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2215 u32 value_size = round_up(htab->map.value_size, 8);
2216 bool prealloc = htab_is_prealloc(htab);
2217 bool percpu = htab_is_percpu(htab);
2218 bool lru = htab_is_lru(htab);
2220 u64 usage = sizeof(struct bpf_htab);
2222 usage += sizeof(struct bucket) * htab->n_buckets;
2223 usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
2225 num_entries = map->max_entries;
2226 if (htab_has_extra_elems(htab))
2227 num_entries += num_possible_cpus();
2229 usage += htab->elem_size * num_entries;
2232 usage += value_size * num_possible_cpus() * num_entries;
2234 usage += sizeof(struct htab_elem *) * num_possible_cpus();
2236 #define LLIST_NODE_SZ sizeof(struct llist_node)
2238 num_entries = htab->use_percpu_counter ?
2239 percpu_counter_sum(&htab->pcount) :
2240 atomic_read(&htab->count);
2241 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
2243 usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
2244 usage += value_size * num_possible_cpus() * num_entries;
2250 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2251 const struct bpf_map_ops htab_map_ops = {
2252 .map_meta_equal = bpf_map_meta_equal,
2253 .map_alloc_check = htab_map_alloc_check,
2254 .map_alloc = htab_map_alloc,
2255 .map_free = htab_map_free,
2256 .map_get_next_key = htab_map_get_next_key,
2257 .map_release_uref = htab_map_free_timers,
2258 .map_lookup_elem = htab_map_lookup_elem,
2259 .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2260 .map_update_elem = htab_map_update_elem,
2261 .map_delete_elem = htab_map_delete_elem,
2262 .map_gen_lookup = htab_map_gen_lookup,
2263 .map_seq_show_elem = htab_map_seq_show_elem,
2264 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2265 .map_for_each_callback = bpf_for_each_hash_elem,
2266 .map_mem_usage = htab_map_mem_usage,
2268 .map_btf_id = &htab_map_btf_ids[0],
2269 .iter_seq_info = &iter_seq_info,
2272 const struct bpf_map_ops htab_lru_map_ops = {
2273 .map_meta_equal = bpf_map_meta_equal,
2274 .map_alloc_check = htab_map_alloc_check,
2275 .map_alloc = htab_map_alloc,
2276 .map_free = htab_map_free,
2277 .map_get_next_key = htab_map_get_next_key,
2278 .map_release_uref = htab_map_free_timers,
2279 .map_lookup_elem = htab_lru_map_lookup_elem,
2280 .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2281 .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2282 .map_update_elem = htab_lru_map_update_elem,
2283 .map_delete_elem = htab_lru_map_delete_elem,
2284 .map_gen_lookup = htab_lru_map_gen_lookup,
2285 .map_seq_show_elem = htab_map_seq_show_elem,
2286 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2287 .map_for_each_callback = bpf_for_each_hash_elem,
2288 .map_mem_usage = htab_map_mem_usage,
2289 BATCH_OPS(htab_lru),
2290 .map_btf_id = &htab_map_btf_ids[0],
2291 .iter_seq_info = &iter_seq_info,
2294 /* Called from eBPF program */
2295 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2297 struct htab_elem *l = __htab_map_lookup_elem(map, key);
2300 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2305 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2307 struct htab_elem *l;
2309 if (cpu >= nr_cpu_ids)
2312 l = __htab_map_lookup_elem(map, key);
2314 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2319 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2321 struct htab_elem *l = __htab_map_lookup_elem(map, key);
2324 bpf_lru_node_set_ref(&l->lru_node);
2325 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2331 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2333 struct htab_elem *l;
2335 if (cpu >= nr_cpu_ids)
2338 l = __htab_map_lookup_elem(map, key);
2340 bpf_lru_node_set_ref(&l->lru_node);
2341 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2347 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2349 struct htab_elem *l;
2350 void __percpu *pptr;
2355 /* per_cpu areas are zero-filled and bpf programs can only
2356 * access 'value_size' of them, so copying rounded areas
2357 * will not leak any kernel data
2359 size = round_up(map->value_size, 8);
2361 l = __htab_map_lookup_elem(map, key);
2364 /* We do not mark LRU map element here in order to not mess up
2365 * eviction heuristics when user space does a map walk.
2367 pptr = htab_elem_get_ptr(l, map->key_size);
2368 for_each_possible_cpu(cpu) {
2369 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
2370 check_and_init_map_value(map, value + off);
2379 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2382 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2386 if (htab_is_lru(htab))
2387 ret = __htab_lru_percpu_map_update_elem(map, key, value,
2390 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2397 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2400 struct htab_elem *l;
2401 void __percpu *pptr;
2406 l = __htab_map_lookup_elem(map, key);
2412 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2413 seq_puts(m, ": {\n");
2414 pptr = htab_elem_get_ptr(l, map->key_size);
2415 for_each_possible_cpu(cpu) {
2416 seq_printf(m, "\tcpu%d: ", cpu);
2417 btf_type_seq_show(map->btf, map->btf_value_type_id,
2418 per_cpu_ptr(pptr, cpu), m);
2426 const struct bpf_map_ops htab_percpu_map_ops = {
2427 .map_meta_equal = bpf_map_meta_equal,
2428 .map_alloc_check = htab_map_alloc_check,
2429 .map_alloc = htab_map_alloc,
2430 .map_free = htab_map_free,
2431 .map_get_next_key = htab_map_get_next_key,
2432 .map_lookup_elem = htab_percpu_map_lookup_elem,
2433 .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2434 .map_update_elem = htab_percpu_map_update_elem,
2435 .map_delete_elem = htab_map_delete_elem,
2436 .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2437 .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2438 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2439 .map_for_each_callback = bpf_for_each_hash_elem,
2440 .map_mem_usage = htab_map_mem_usage,
2441 BATCH_OPS(htab_percpu),
2442 .map_btf_id = &htab_map_btf_ids[0],
2443 .iter_seq_info = &iter_seq_info,
2446 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2447 .map_meta_equal = bpf_map_meta_equal,
2448 .map_alloc_check = htab_map_alloc_check,
2449 .map_alloc = htab_map_alloc,
2450 .map_free = htab_map_free,
2451 .map_get_next_key = htab_map_get_next_key,
2452 .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2453 .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2454 .map_update_elem = htab_lru_percpu_map_update_elem,
2455 .map_delete_elem = htab_lru_map_delete_elem,
2456 .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2457 .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2458 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2459 .map_for_each_callback = bpf_for_each_hash_elem,
2460 .map_mem_usage = htab_map_mem_usage,
2461 BATCH_OPS(htab_lru_percpu),
2462 .map_btf_id = &htab_map_btf_ids[0],
2463 .iter_seq_info = &iter_seq_info,
2466 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2468 if (attr->value_size != sizeof(u32))
2470 return htab_map_alloc_check(attr);
2473 static void fd_htab_map_free(struct bpf_map *map)
2475 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2476 struct hlist_nulls_node *n;
2477 struct hlist_nulls_head *head;
2478 struct htab_elem *l;
2481 for (i = 0; i < htab->n_buckets; i++) {
2482 head = select_bucket(htab, i);
2484 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2485 void *ptr = fd_htab_map_get_ptr(map, l);
2487 map->ops->map_fd_put_ptr(ptr);
2494 /* only called from syscall */
2495 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2500 if (!map->ops->map_fd_sys_lookup_elem)
2504 ptr = htab_map_lookup_elem(map, key);
2506 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2514 /* only called from syscall */
2515 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2516 void *key, void *value, u64 map_flags)
2520 u32 ufd = *(u32 *)value;
2522 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2524 return PTR_ERR(ptr);
2526 ret = htab_map_update_elem(map, key, &ptr, map_flags);
2528 map->ops->map_fd_put_ptr(ptr);
2533 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2535 struct bpf_map *map, *inner_map_meta;
2537 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2538 if (IS_ERR(inner_map_meta))
2539 return inner_map_meta;
2541 map = htab_map_alloc(attr);
2543 bpf_map_meta_free(inner_map_meta);
2547 map->inner_map_meta = inner_map_meta;
2552 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2554 struct bpf_map **inner_map = htab_map_lookup_elem(map, key);
2559 return READ_ONCE(*inner_map);
2562 static int htab_of_map_gen_lookup(struct bpf_map *map,
2563 struct bpf_insn *insn_buf)
2565 struct bpf_insn *insn = insn_buf;
2566 const int ret = BPF_REG_0;
2568 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2569 (void *(*)(struct bpf_map *map, void *key))NULL));
2570 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2571 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2572 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2573 offsetof(struct htab_elem, key) +
2574 round_up(map->key_size, 8));
2575 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2577 return insn - insn_buf;
2580 static void htab_of_map_free(struct bpf_map *map)
2582 bpf_map_meta_free(map->inner_map_meta);
2583 fd_htab_map_free(map);
2586 const struct bpf_map_ops htab_of_maps_map_ops = {
2587 .map_alloc_check = fd_htab_map_alloc_check,
2588 .map_alloc = htab_of_map_alloc,
2589 .map_free = htab_of_map_free,
2590 .map_get_next_key = htab_map_get_next_key,
2591 .map_lookup_elem = htab_of_map_lookup_elem,
2592 .map_delete_elem = htab_map_delete_elem,
2593 .map_fd_get_ptr = bpf_map_fd_get_ptr,
2594 .map_fd_put_ptr = bpf_map_fd_put_ptr,
2595 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2596 .map_gen_lookup = htab_of_map_gen_lookup,
2597 .map_check_btf = map_check_no_btf,
2598 .map_mem_usage = htab_map_mem_usage,
2600 .map_btf_id = &htab_map_btf_ids[0],