bpf: Eliminate rlimit-based memory accounting for hashtab maps
[linux-2.6-microblaze.git] / kernel / bpf / hashtab.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include "percpu_freelist.h"
14 #include "bpf_lru_list.h"
15 #include "map_in_map.h"
16
17 #define HTAB_CREATE_FLAG_MASK                                           \
18         (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |    \
19          BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
20
21 #define BATCH_OPS(_name)                        \
22         .map_lookup_batch =                     \
23         _name##_map_lookup_batch,               \
24         .map_lookup_and_delete_batch =          \
25         _name##_map_lookup_and_delete_batch,    \
26         .map_update_batch =                     \
27         generic_map_update_batch,               \
28         .map_delete_batch =                     \
29         generic_map_delete_batch
30
31 /*
32  * The bucket lock has two protection scopes:
33  *
34  * 1) Serializing concurrent operations from BPF programs on differrent
35  *    CPUs
36  *
37  * 2) Serializing concurrent operations from BPF programs and sys_bpf()
38  *
39  * BPF programs can execute in any context including perf, kprobes and
40  * tracing. As there are almost no limits where perf, kprobes and tracing
41  * can be invoked from the lock operations need to be protected against
42  * deadlocks. Deadlocks can be caused by recursion and by an invocation in
43  * the lock held section when functions which acquire this lock are invoked
44  * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
45  * variable bpf_prog_active, which prevents BPF programs attached to perf
46  * events, kprobes and tracing to be invoked before the prior invocation
47  * from one of these contexts completed. sys_bpf() uses the same mechanism
48  * by pinning the task to the current CPU and incrementing the recursion
49  * protection accross the map operation.
50  *
51  * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
52  * operations like memory allocations (even with GFP_ATOMIC) from atomic
53  * contexts. This is required because even with GFP_ATOMIC the memory
54  * allocator calls into code pathes which acquire locks with long held lock
55  * sections. To ensure the deterministic behaviour these locks are regular
56  * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
57  * true atomic contexts on an RT kernel are the low level hardware
58  * handling, scheduling, low level interrupt handling, NMIs etc. None of
59  * these contexts should ever do memory allocations.
60  *
61  * As regular device interrupt handlers and soft interrupts are forced into
62  * thread context, the existing code which does
63  *   spin_lock*(); alloc(GPF_ATOMIC); spin_unlock*();
64  * just works.
65  *
66  * In theory the BPF locks could be converted to regular spinlocks as well,
67  * but the bucket locks and percpu_freelist locks can be taken from
68  * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
69  * atomic contexts even on RT. These mechanisms require preallocated maps,
70  * so there is no need to invoke memory allocations within the lock held
71  * sections.
72  *
73  * BPF maps which need dynamic allocation are only used from (forced)
74  * thread context on RT and can therefore use regular spinlocks which in
75  * turn allows to invoke memory allocations from the lock held section.
76  *
77  * On a non RT kernel this distinction is neither possible nor required.
78  * spinlock maps to raw_spinlock and the extra code is optimized out by the
79  * compiler.
80  */
81 struct bucket {
82         struct hlist_nulls_head head;
83         union {
84                 raw_spinlock_t raw_lock;
85                 spinlock_t     lock;
86         };
87 };
88
89 #define HASHTAB_MAP_LOCK_COUNT 8
90 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
91
92 struct bpf_htab {
93         struct bpf_map map;
94         struct bucket *buckets;
95         void *elems;
96         union {
97                 struct pcpu_freelist freelist;
98                 struct bpf_lru lru;
99         };
100         struct htab_elem *__percpu *extra_elems;
101         atomic_t count; /* number of elements in this hashtable */
102         u32 n_buckets;  /* number of hash buckets */
103         u32 elem_size;  /* size of each element in bytes */
104         u32 hashrnd;
105         struct lock_class_key lockdep_key;
106         int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
107 };
108
109 /* each htab element is struct htab_elem + key + value */
110 struct htab_elem {
111         union {
112                 struct hlist_nulls_node hash_node;
113                 struct {
114                         void *padding;
115                         union {
116                                 struct bpf_htab *htab;
117                                 struct pcpu_freelist_node fnode;
118                                 struct htab_elem *batch_flink;
119                         };
120                 };
121         };
122         union {
123                 struct rcu_head rcu;
124                 struct bpf_lru_node lru_node;
125         };
126         u32 hash;
127         char key[] __aligned(8);
128 };
129
130 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
131 {
132         return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
133 }
134
135 static inline bool htab_use_raw_lock(const struct bpf_htab *htab)
136 {
137         return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab));
138 }
139
140 static void htab_init_buckets(struct bpf_htab *htab)
141 {
142         unsigned i;
143
144         for (i = 0; i < htab->n_buckets; i++) {
145                 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
146                 if (htab_use_raw_lock(htab)) {
147                         raw_spin_lock_init(&htab->buckets[i].raw_lock);
148                         lockdep_set_class(&htab->buckets[i].raw_lock,
149                                           &htab->lockdep_key);
150                 } else {
151                         spin_lock_init(&htab->buckets[i].lock);
152                         lockdep_set_class(&htab->buckets[i].lock,
153                                           &htab->lockdep_key);
154                 }
155         }
156 }
157
158 static inline int htab_lock_bucket(const struct bpf_htab *htab,
159                                    struct bucket *b, u32 hash,
160                                    unsigned long *pflags)
161 {
162         unsigned long flags;
163
164         hash = hash & HASHTAB_MAP_LOCK_MASK;
165
166         migrate_disable();
167         if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
168                 __this_cpu_dec(*(htab->map_locked[hash]));
169                 migrate_enable();
170                 return -EBUSY;
171         }
172
173         if (htab_use_raw_lock(htab))
174                 raw_spin_lock_irqsave(&b->raw_lock, flags);
175         else
176                 spin_lock_irqsave(&b->lock, flags);
177         *pflags = flags;
178
179         return 0;
180 }
181
182 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
183                                       struct bucket *b, u32 hash,
184                                       unsigned long flags)
185 {
186         hash = hash & HASHTAB_MAP_LOCK_MASK;
187         if (htab_use_raw_lock(htab))
188                 raw_spin_unlock_irqrestore(&b->raw_lock, flags);
189         else
190                 spin_unlock_irqrestore(&b->lock, flags);
191         __this_cpu_dec(*(htab->map_locked[hash]));
192         migrate_enable();
193 }
194
195 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
196
197 static bool htab_is_lru(const struct bpf_htab *htab)
198 {
199         return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
200                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
201 }
202
203 static bool htab_is_percpu(const struct bpf_htab *htab)
204 {
205         return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
206                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
207 }
208
209 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
210                                      void __percpu *pptr)
211 {
212         *(void __percpu **)(l->key + key_size) = pptr;
213 }
214
215 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
216 {
217         return *(void __percpu **)(l->key + key_size);
218 }
219
220 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
221 {
222         return *(void **)(l->key + roundup(map->key_size, 8));
223 }
224
225 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
226 {
227         return (struct htab_elem *) (htab->elems + i * htab->elem_size);
228 }
229
230 static void htab_free_elems(struct bpf_htab *htab)
231 {
232         int i;
233
234         if (!htab_is_percpu(htab))
235                 goto free_elems;
236
237         for (i = 0; i < htab->map.max_entries; i++) {
238                 void __percpu *pptr;
239
240                 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
241                                          htab->map.key_size);
242                 free_percpu(pptr);
243                 cond_resched();
244         }
245 free_elems:
246         bpf_map_area_free(htab->elems);
247 }
248
249 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
250  * (bucket_lock). If both locks need to be acquired together, the lock
251  * order is always lru_lock -> bucket_lock and this only happens in
252  * bpf_lru_list.c logic. For example, certain code path of
253  * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
254  * will acquire lru_lock first followed by acquiring bucket_lock.
255  *
256  * In hashtab.c, to avoid deadlock, lock acquisition of
257  * bucket_lock followed by lru_lock is not allowed. In such cases,
258  * bucket_lock needs to be released first before acquiring lru_lock.
259  */
260 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
261                                           u32 hash)
262 {
263         struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
264         struct htab_elem *l;
265
266         if (node) {
267                 l = container_of(node, struct htab_elem, lru_node);
268                 memcpy(l->key, key, htab->map.key_size);
269                 return l;
270         }
271
272         return NULL;
273 }
274
275 static int prealloc_init(struct bpf_htab *htab)
276 {
277         u32 num_entries = htab->map.max_entries;
278         int err = -ENOMEM, i;
279
280         if (!htab_is_percpu(htab) && !htab_is_lru(htab))
281                 num_entries += num_possible_cpus();
282
283         htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
284                                          htab->map.numa_node);
285         if (!htab->elems)
286                 return -ENOMEM;
287
288         if (!htab_is_percpu(htab))
289                 goto skip_percpu_elems;
290
291         for (i = 0; i < num_entries; i++) {
292                 u32 size = round_up(htab->map.value_size, 8);
293                 void __percpu *pptr;
294
295                 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
296                                             GFP_USER | __GFP_NOWARN);
297                 if (!pptr)
298                         goto free_elems;
299                 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
300                                   pptr);
301                 cond_resched();
302         }
303
304 skip_percpu_elems:
305         if (htab_is_lru(htab))
306                 err = bpf_lru_init(&htab->lru,
307                                    htab->map.map_flags & BPF_F_NO_COMMON_LRU,
308                                    offsetof(struct htab_elem, hash) -
309                                    offsetof(struct htab_elem, lru_node),
310                                    htab_lru_map_delete_node,
311                                    htab);
312         else
313                 err = pcpu_freelist_init(&htab->freelist);
314
315         if (err)
316                 goto free_elems;
317
318         if (htab_is_lru(htab))
319                 bpf_lru_populate(&htab->lru, htab->elems,
320                                  offsetof(struct htab_elem, lru_node),
321                                  htab->elem_size, num_entries);
322         else
323                 pcpu_freelist_populate(&htab->freelist,
324                                        htab->elems + offsetof(struct htab_elem, fnode),
325                                        htab->elem_size, num_entries);
326
327         return 0;
328
329 free_elems:
330         htab_free_elems(htab);
331         return err;
332 }
333
334 static void prealloc_destroy(struct bpf_htab *htab)
335 {
336         htab_free_elems(htab);
337
338         if (htab_is_lru(htab))
339                 bpf_lru_destroy(&htab->lru);
340         else
341                 pcpu_freelist_destroy(&htab->freelist);
342 }
343
344 static int alloc_extra_elems(struct bpf_htab *htab)
345 {
346         struct htab_elem *__percpu *pptr, *l_new;
347         struct pcpu_freelist_node *l;
348         int cpu;
349
350         pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
351                                     GFP_USER | __GFP_NOWARN);
352         if (!pptr)
353                 return -ENOMEM;
354
355         for_each_possible_cpu(cpu) {
356                 l = pcpu_freelist_pop(&htab->freelist);
357                 /* pop will succeed, since prealloc_init()
358                  * preallocated extra num_possible_cpus elements
359                  */
360                 l_new = container_of(l, struct htab_elem, fnode);
361                 *per_cpu_ptr(pptr, cpu) = l_new;
362         }
363         htab->extra_elems = pptr;
364         return 0;
365 }
366
367 /* Called from syscall */
368 static int htab_map_alloc_check(union bpf_attr *attr)
369 {
370         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
371                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
372         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
373                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
374         /* percpu_lru means each cpu has its own LRU list.
375          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
376          * the map's value itself is percpu.  percpu_lru has
377          * nothing to do with the map's value.
378          */
379         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
380         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
381         bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
382         int numa_node = bpf_map_attr_numa_node(attr);
383
384         BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
385                      offsetof(struct htab_elem, hash_node.pprev));
386         BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
387                      offsetof(struct htab_elem, hash_node.pprev));
388
389         if (lru && !bpf_capable())
390                 /* LRU implementation is much complicated than other
391                  * maps.  Hence, limit to CAP_BPF.
392                  */
393                 return -EPERM;
394
395         if (zero_seed && !capable(CAP_SYS_ADMIN))
396                 /* Guard against local DoS, and discourage production use. */
397                 return -EPERM;
398
399         if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
400             !bpf_map_flags_access_ok(attr->map_flags))
401                 return -EINVAL;
402
403         if (!lru && percpu_lru)
404                 return -EINVAL;
405
406         if (lru && !prealloc)
407                 return -ENOTSUPP;
408
409         if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
410                 return -EINVAL;
411
412         /* check sanity of attributes.
413          * value_size == 0 may be allowed in the future to use map as a set
414          */
415         if (attr->max_entries == 0 || attr->key_size == 0 ||
416             attr->value_size == 0)
417                 return -EINVAL;
418
419         if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
420            sizeof(struct htab_elem))
421                 /* if key_size + value_size is bigger, the user space won't be
422                  * able to access the elements via bpf syscall. This check
423                  * also makes sure that the elem_size doesn't overflow and it's
424                  * kmalloc-able later in htab_map_update_elem()
425                  */
426                 return -E2BIG;
427
428         return 0;
429 }
430
431 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
432 {
433         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
434                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
435         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
436                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
437         /* percpu_lru means each cpu has its own LRU list.
438          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
439          * the map's value itself is percpu.  percpu_lru has
440          * nothing to do with the map's value.
441          */
442         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
443         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
444         struct bpf_htab *htab;
445         int err, i;
446
447         htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT);
448         if (!htab)
449                 return ERR_PTR(-ENOMEM);
450
451         lockdep_register_key(&htab->lockdep_key);
452
453         bpf_map_init_from_attr(&htab->map, attr);
454
455         if (percpu_lru) {
456                 /* ensure each CPU's lru list has >=1 elements.
457                  * since we are at it, make each lru list has the same
458                  * number of elements.
459                  */
460                 htab->map.max_entries = roundup(attr->max_entries,
461                                                 num_possible_cpus());
462                 if (htab->map.max_entries < attr->max_entries)
463                         htab->map.max_entries = rounddown(attr->max_entries,
464                                                           num_possible_cpus());
465         }
466
467         /* hash table size must be power of 2 */
468         htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
469
470         htab->elem_size = sizeof(struct htab_elem) +
471                           round_up(htab->map.key_size, 8);
472         if (percpu)
473                 htab->elem_size += sizeof(void *);
474         else
475                 htab->elem_size += round_up(htab->map.value_size, 8);
476
477         err = -E2BIG;
478         /* prevent zero size kmalloc and check for u32 overflow */
479         if (htab->n_buckets == 0 ||
480             htab->n_buckets > U32_MAX / sizeof(struct bucket))
481                 goto free_htab;
482
483         err = -ENOMEM;
484         htab->buckets = bpf_map_area_alloc(htab->n_buckets *
485                                            sizeof(struct bucket),
486                                            htab->map.numa_node);
487         if (!htab->buckets)
488                 goto free_htab;
489
490         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
491                 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
492                                                            sizeof(int),
493                                                            sizeof(int),
494                                                            GFP_USER);
495                 if (!htab->map_locked[i])
496                         goto free_map_locked;
497         }
498
499         if (htab->map.map_flags & BPF_F_ZERO_SEED)
500                 htab->hashrnd = 0;
501         else
502                 htab->hashrnd = get_random_int();
503
504         htab_init_buckets(htab);
505
506         if (prealloc) {
507                 err = prealloc_init(htab);
508                 if (err)
509                         goto free_map_locked;
510
511                 if (!percpu && !lru) {
512                         /* lru itself can remove the least used element, so
513                          * there is no need for an extra elem during map_update.
514                          */
515                         err = alloc_extra_elems(htab);
516                         if (err)
517                                 goto free_prealloc;
518                 }
519         }
520
521         return &htab->map;
522
523 free_prealloc:
524         prealloc_destroy(htab);
525 free_map_locked:
526         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
527                 free_percpu(htab->map_locked[i]);
528         bpf_map_area_free(htab->buckets);
529 free_htab:
530         lockdep_unregister_key(&htab->lockdep_key);
531         kfree(htab);
532         return ERR_PTR(err);
533 }
534
535 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
536 {
537         return jhash(key, key_len, hashrnd);
538 }
539
540 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
541 {
542         return &htab->buckets[hash & (htab->n_buckets - 1)];
543 }
544
545 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
546 {
547         return &__select_bucket(htab, hash)->head;
548 }
549
550 /* this lookup function can only be called with bucket lock taken */
551 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
552                                          void *key, u32 key_size)
553 {
554         struct hlist_nulls_node *n;
555         struct htab_elem *l;
556
557         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
558                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
559                         return l;
560
561         return NULL;
562 }
563
564 /* can be called without bucket lock. it will repeat the loop in
565  * the unlikely event when elements moved from one bucket into another
566  * while link list is being walked
567  */
568 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
569                                                u32 hash, void *key,
570                                                u32 key_size, u32 n_buckets)
571 {
572         struct hlist_nulls_node *n;
573         struct htab_elem *l;
574
575 again:
576         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
577                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
578                         return l;
579
580         if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
581                 goto again;
582
583         return NULL;
584 }
585
586 /* Called from syscall or from eBPF program directly, so
587  * arguments have to match bpf_map_lookup_elem() exactly.
588  * The return value is adjusted by BPF instructions
589  * in htab_map_gen_lookup().
590  */
591 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
592 {
593         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
594         struct hlist_nulls_head *head;
595         struct htab_elem *l;
596         u32 hash, key_size;
597
598         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
599
600         key_size = map->key_size;
601
602         hash = htab_map_hash(key, key_size, htab->hashrnd);
603
604         head = select_bucket(htab, hash);
605
606         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
607
608         return l;
609 }
610
611 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
612 {
613         struct htab_elem *l = __htab_map_lookup_elem(map, key);
614
615         if (l)
616                 return l->key + round_up(map->key_size, 8);
617
618         return NULL;
619 }
620
621 /* inline bpf_map_lookup_elem() call.
622  * Instead of:
623  * bpf_prog
624  *   bpf_map_lookup_elem
625  *     map->ops->map_lookup_elem
626  *       htab_map_lookup_elem
627  *         __htab_map_lookup_elem
628  * do:
629  * bpf_prog
630  *   __htab_map_lookup_elem
631  */
632 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
633 {
634         struct bpf_insn *insn = insn_buf;
635         const int ret = BPF_REG_0;
636
637         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
638                      (void *(*)(struct bpf_map *map, void *key))NULL));
639         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
640         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
641         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
642                                 offsetof(struct htab_elem, key) +
643                                 round_up(map->key_size, 8));
644         return insn - insn_buf;
645 }
646
647 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
648                                                         void *key, const bool mark)
649 {
650         struct htab_elem *l = __htab_map_lookup_elem(map, key);
651
652         if (l) {
653                 if (mark)
654                         bpf_lru_node_set_ref(&l->lru_node);
655                 return l->key + round_up(map->key_size, 8);
656         }
657
658         return NULL;
659 }
660
661 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
662 {
663         return __htab_lru_map_lookup_elem(map, key, true);
664 }
665
666 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
667 {
668         return __htab_lru_map_lookup_elem(map, key, false);
669 }
670
671 static int htab_lru_map_gen_lookup(struct bpf_map *map,
672                                    struct bpf_insn *insn_buf)
673 {
674         struct bpf_insn *insn = insn_buf;
675         const int ret = BPF_REG_0;
676         const int ref_reg = BPF_REG_1;
677
678         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
679                      (void *(*)(struct bpf_map *map, void *key))NULL));
680         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
681         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
682         *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
683                               offsetof(struct htab_elem, lru_node) +
684                               offsetof(struct bpf_lru_node, ref));
685         *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
686         *insn++ = BPF_ST_MEM(BPF_B, ret,
687                              offsetof(struct htab_elem, lru_node) +
688                              offsetof(struct bpf_lru_node, ref),
689                              1);
690         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
691                                 offsetof(struct htab_elem, key) +
692                                 round_up(map->key_size, 8));
693         return insn - insn_buf;
694 }
695
696 /* It is called from the bpf_lru_list when the LRU needs to delete
697  * older elements from the htab.
698  */
699 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
700 {
701         struct bpf_htab *htab = (struct bpf_htab *)arg;
702         struct htab_elem *l = NULL, *tgt_l;
703         struct hlist_nulls_head *head;
704         struct hlist_nulls_node *n;
705         unsigned long flags;
706         struct bucket *b;
707         int ret;
708
709         tgt_l = container_of(node, struct htab_elem, lru_node);
710         b = __select_bucket(htab, tgt_l->hash);
711         head = &b->head;
712
713         ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
714         if (ret)
715                 return false;
716
717         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
718                 if (l == tgt_l) {
719                         hlist_nulls_del_rcu(&l->hash_node);
720                         break;
721                 }
722
723         htab_unlock_bucket(htab, b, tgt_l->hash, flags);
724
725         return l == tgt_l;
726 }
727
728 /* Called from syscall */
729 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
730 {
731         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
732         struct hlist_nulls_head *head;
733         struct htab_elem *l, *next_l;
734         u32 hash, key_size;
735         int i = 0;
736
737         WARN_ON_ONCE(!rcu_read_lock_held());
738
739         key_size = map->key_size;
740
741         if (!key)
742                 goto find_first_elem;
743
744         hash = htab_map_hash(key, key_size, htab->hashrnd);
745
746         head = select_bucket(htab, hash);
747
748         /* lookup the key */
749         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
750
751         if (!l)
752                 goto find_first_elem;
753
754         /* key was found, get next key in the same bucket */
755         next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
756                                   struct htab_elem, hash_node);
757
758         if (next_l) {
759                 /* if next elem in this hash list is non-zero, just return it */
760                 memcpy(next_key, next_l->key, key_size);
761                 return 0;
762         }
763
764         /* no more elements in this hash list, go to the next bucket */
765         i = hash & (htab->n_buckets - 1);
766         i++;
767
768 find_first_elem:
769         /* iterate over buckets */
770         for (; i < htab->n_buckets; i++) {
771                 head = select_bucket(htab, i);
772
773                 /* pick first element in the bucket */
774                 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
775                                           struct htab_elem, hash_node);
776                 if (next_l) {
777                         /* if it's not empty, just return it */
778                         memcpy(next_key, next_l->key, key_size);
779                         return 0;
780                 }
781         }
782
783         /* iterated over all buckets and all elements */
784         return -ENOENT;
785 }
786
787 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
788 {
789         if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
790                 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
791         kfree(l);
792 }
793
794 static void htab_elem_free_rcu(struct rcu_head *head)
795 {
796         struct htab_elem *l = container_of(head, struct htab_elem, rcu);
797         struct bpf_htab *htab = l->htab;
798
799         htab_elem_free(htab, l);
800 }
801
802 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
803 {
804         struct bpf_map *map = &htab->map;
805         void *ptr;
806
807         if (map->ops->map_fd_put_ptr) {
808                 ptr = fd_htab_map_get_ptr(map, l);
809                 map->ops->map_fd_put_ptr(ptr);
810         }
811 }
812
813 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
814 {
815         htab_put_fd_value(htab, l);
816
817         if (htab_is_prealloc(htab)) {
818                 __pcpu_freelist_push(&htab->freelist, &l->fnode);
819         } else {
820                 atomic_dec(&htab->count);
821                 l->htab = htab;
822                 call_rcu(&l->rcu, htab_elem_free_rcu);
823         }
824 }
825
826 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
827                             void *value, bool onallcpus)
828 {
829         if (!onallcpus) {
830                 /* copy true value_size bytes */
831                 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
832         } else {
833                 u32 size = round_up(htab->map.value_size, 8);
834                 int off = 0, cpu;
835
836                 for_each_possible_cpu(cpu) {
837                         bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
838                                         value + off, size);
839                         off += size;
840                 }
841         }
842 }
843
844 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
845                             void *value, bool onallcpus)
846 {
847         /* When using prealloc and not setting the initial value on all cpus,
848          * zero-fill element values for other cpus (just as what happens when
849          * not using prealloc). Otherwise, bpf program has no way to ensure
850          * known initial values for cpus other than current one
851          * (onallcpus=false always when coming from bpf prog).
852          */
853         if (htab_is_prealloc(htab) && !onallcpus) {
854                 u32 size = round_up(htab->map.value_size, 8);
855                 int current_cpu = raw_smp_processor_id();
856                 int cpu;
857
858                 for_each_possible_cpu(cpu) {
859                         if (cpu == current_cpu)
860                                 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
861                                                 size);
862                         else
863                                 memset(per_cpu_ptr(pptr, cpu), 0, size);
864                 }
865         } else {
866                 pcpu_copy_value(htab, pptr, value, onallcpus);
867         }
868 }
869
870 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
871 {
872         return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
873                BITS_PER_LONG == 64;
874 }
875
876 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
877                                          void *value, u32 key_size, u32 hash,
878                                          bool percpu, bool onallcpus,
879                                          struct htab_elem *old_elem)
880 {
881         u32 size = htab->map.value_size;
882         bool prealloc = htab_is_prealloc(htab);
883         struct htab_elem *l_new, **pl_new;
884         void __percpu *pptr;
885
886         if (prealloc) {
887                 if (old_elem) {
888                         /* if we're updating the existing element,
889                          * use per-cpu extra elems to avoid freelist_pop/push
890                          */
891                         pl_new = this_cpu_ptr(htab->extra_elems);
892                         l_new = *pl_new;
893                         htab_put_fd_value(htab, old_elem);
894                         *pl_new = old_elem;
895                 } else {
896                         struct pcpu_freelist_node *l;
897
898                         l = __pcpu_freelist_pop(&htab->freelist);
899                         if (!l)
900                                 return ERR_PTR(-E2BIG);
901                         l_new = container_of(l, struct htab_elem, fnode);
902                 }
903         } else {
904                 if (atomic_inc_return(&htab->count) > htab->map.max_entries)
905                         if (!old_elem) {
906                                 /* when map is full and update() is replacing
907                                  * old element, it's ok to allocate, since
908                                  * old element will be freed immediately.
909                                  * Otherwise return an error
910                                  */
911                                 l_new = ERR_PTR(-E2BIG);
912                                 goto dec_count;
913                         }
914                 l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
915                                              GFP_ATOMIC | __GFP_NOWARN,
916                                              htab->map.numa_node);
917                 if (!l_new) {
918                         l_new = ERR_PTR(-ENOMEM);
919                         goto dec_count;
920                 }
921                 check_and_init_map_lock(&htab->map,
922                                         l_new->key + round_up(key_size, 8));
923         }
924
925         memcpy(l_new->key, key, key_size);
926         if (percpu) {
927                 size = round_up(size, 8);
928                 if (prealloc) {
929                         pptr = htab_elem_get_ptr(l_new, key_size);
930                 } else {
931                         /* alloc_percpu zero-fills */
932                         pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
933                                                     GFP_ATOMIC | __GFP_NOWARN);
934                         if (!pptr) {
935                                 kfree(l_new);
936                                 l_new = ERR_PTR(-ENOMEM);
937                                 goto dec_count;
938                         }
939                 }
940
941                 pcpu_init_value(htab, pptr, value, onallcpus);
942
943                 if (!prealloc)
944                         htab_elem_set_ptr(l_new, key_size, pptr);
945         } else if (fd_htab_map_needs_adjust(htab)) {
946                 size = round_up(size, 8);
947                 memcpy(l_new->key + round_up(key_size, 8), value, size);
948         } else {
949                 copy_map_value(&htab->map,
950                                l_new->key + round_up(key_size, 8),
951                                value);
952         }
953
954         l_new->hash = hash;
955         return l_new;
956 dec_count:
957         atomic_dec(&htab->count);
958         return l_new;
959 }
960
961 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
962                        u64 map_flags)
963 {
964         if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
965                 /* elem already exists */
966                 return -EEXIST;
967
968         if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
969                 /* elem doesn't exist, cannot update it */
970                 return -ENOENT;
971
972         return 0;
973 }
974
975 /* Called from syscall or from eBPF program */
976 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
977                                 u64 map_flags)
978 {
979         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
980         struct htab_elem *l_new = NULL, *l_old;
981         struct hlist_nulls_head *head;
982         unsigned long flags;
983         struct bucket *b;
984         u32 key_size, hash;
985         int ret;
986
987         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
988                 /* unknown flags */
989                 return -EINVAL;
990
991         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
992
993         key_size = map->key_size;
994
995         hash = htab_map_hash(key, key_size, htab->hashrnd);
996
997         b = __select_bucket(htab, hash);
998         head = &b->head;
999
1000         if (unlikely(map_flags & BPF_F_LOCK)) {
1001                 if (unlikely(!map_value_has_spin_lock(map)))
1002                         return -EINVAL;
1003                 /* find an element without taking the bucket lock */
1004                 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1005                                               htab->n_buckets);
1006                 ret = check_flags(htab, l_old, map_flags);
1007                 if (ret)
1008                         return ret;
1009                 if (l_old) {
1010                         /* grab the element lock and update value in place */
1011                         copy_map_value_locked(map,
1012                                               l_old->key + round_up(key_size, 8),
1013                                               value, false);
1014                         return 0;
1015                 }
1016                 /* fall through, grab the bucket lock and lookup again.
1017                  * 99.9% chance that the element won't be found,
1018                  * but second lookup under lock has to be done.
1019                  */
1020         }
1021
1022         ret = htab_lock_bucket(htab, b, hash, &flags);
1023         if (ret)
1024                 return ret;
1025
1026         l_old = lookup_elem_raw(head, hash, key, key_size);
1027
1028         ret = check_flags(htab, l_old, map_flags);
1029         if (ret)
1030                 goto err;
1031
1032         if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1033                 /* first lookup without the bucket lock didn't find the element,
1034                  * but second lookup with the bucket lock found it.
1035                  * This case is highly unlikely, but has to be dealt with:
1036                  * grab the element lock in addition to the bucket lock
1037                  * and update element in place
1038                  */
1039                 copy_map_value_locked(map,
1040                                       l_old->key + round_up(key_size, 8),
1041                                       value, false);
1042                 ret = 0;
1043                 goto err;
1044         }
1045
1046         l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1047                                 l_old);
1048         if (IS_ERR(l_new)) {
1049                 /* all pre-allocated elements are in use or memory exhausted */
1050                 ret = PTR_ERR(l_new);
1051                 goto err;
1052         }
1053
1054         /* add new element to the head of the list, so that
1055          * concurrent search will find it before old elem
1056          */
1057         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1058         if (l_old) {
1059                 hlist_nulls_del_rcu(&l_old->hash_node);
1060                 if (!htab_is_prealloc(htab))
1061                         free_htab_elem(htab, l_old);
1062         }
1063         ret = 0;
1064 err:
1065         htab_unlock_bucket(htab, b, hash, flags);
1066         return ret;
1067 }
1068
1069 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1070                                     u64 map_flags)
1071 {
1072         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1073         struct htab_elem *l_new, *l_old = NULL;
1074         struct hlist_nulls_head *head;
1075         unsigned long flags;
1076         struct bucket *b;
1077         u32 key_size, hash;
1078         int ret;
1079
1080         if (unlikely(map_flags > BPF_EXIST))
1081                 /* unknown flags */
1082                 return -EINVAL;
1083
1084         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
1085
1086         key_size = map->key_size;
1087
1088         hash = htab_map_hash(key, key_size, htab->hashrnd);
1089
1090         b = __select_bucket(htab, hash);
1091         head = &b->head;
1092
1093         /* For LRU, we need to alloc before taking bucket's
1094          * spinlock because getting free nodes from LRU may need
1095          * to remove older elements from htab and this removal
1096          * operation will need a bucket lock.
1097          */
1098         l_new = prealloc_lru_pop(htab, key, hash);
1099         if (!l_new)
1100                 return -ENOMEM;
1101         memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
1102
1103         ret = htab_lock_bucket(htab, b, hash, &flags);
1104         if (ret)
1105                 return ret;
1106
1107         l_old = lookup_elem_raw(head, hash, key, key_size);
1108
1109         ret = check_flags(htab, l_old, map_flags);
1110         if (ret)
1111                 goto err;
1112
1113         /* add new element to the head of the list, so that
1114          * concurrent search will find it before old elem
1115          */
1116         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1117         if (l_old) {
1118                 bpf_lru_node_set_ref(&l_new->lru_node);
1119                 hlist_nulls_del_rcu(&l_old->hash_node);
1120         }
1121         ret = 0;
1122
1123 err:
1124         htab_unlock_bucket(htab, b, hash, flags);
1125
1126         if (ret)
1127                 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1128         else if (l_old)
1129                 bpf_lru_push_free(&htab->lru, &l_old->lru_node);
1130
1131         return ret;
1132 }
1133
1134 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1135                                          void *value, u64 map_flags,
1136                                          bool onallcpus)
1137 {
1138         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1139         struct htab_elem *l_new = NULL, *l_old;
1140         struct hlist_nulls_head *head;
1141         unsigned long flags;
1142         struct bucket *b;
1143         u32 key_size, hash;
1144         int ret;
1145
1146         if (unlikely(map_flags > BPF_EXIST))
1147                 /* unknown flags */
1148                 return -EINVAL;
1149
1150         WARN_ON_ONCE(!rcu_read_lock_held());
1151
1152         key_size = map->key_size;
1153
1154         hash = htab_map_hash(key, key_size, htab->hashrnd);
1155
1156         b = __select_bucket(htab, hash);
1157         head = &b->head;
1158
1159         ret = htab_lock_bucket(htab, b, hash, &flags);
1160         if (ret)
1161                 return ret;
1162
1163         l_old = lookup_elem_raw(head, hash, key, key_size);
1164
1165         ret = check_flags(htab, l_old, map_flags);
1166         if (ret)
1167                 goto err;
1168
1169         if (l_old) {
1170                 /* per-cpu hash map can update value in-place */
1171                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1172                                 value, onallcpus);
1173         } else {
1174                 l_new = alloc_htab_elem(htab, key, value, key_size,
1175                                         hash, true, onallcpus, NULL);
1176                 if (IS_ERR(l_new)) {
1177                         ret = PTR_ERR(l_new);
1178                         goto err;
1179                 }
1180                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1181         }
1182         ret = 0;
1183 err:
1184         htab_unlock_bucket(htab, b, hash, flags);
1185         return ret;
1186 }
1187
1188 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1189                                              void *value, u64 map_flags,
1190                                              bool onallcpus)
1191 {
1192         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1193         struct htab_elem *l_new = NULL, *l_old;
1194         struct hlist_nulls_head *head;
1195         unsigned long flags;
1196         struct bucket *b;
1197         u32 key_size, hash;
1198         int ret;
1199
1200         if (unlikely(map_flags > BPF_EXIST))
1201                 /* unknown flags */
1202                 return -EINVAL;
1203
1204         WARN_ON_ONCE(!rcu_read_lock_held());
1205
1206         key_size = map->key_size;
1207
1208         hash = htab_map_hash(key, key_size, htab->hashrnd);
1209
1210         b = __select_bucket(htab, hash);
1211         head = &b->head;
1212
1213         /* For LRU, we need to alloc before taking bucket's
1214          * spinlock because LRU's elem alloc may need
1215          * to remove older elem from htab and this removal
1216          * operation will need a bucket lock.
1217          */
1218         if (map_flags != BPF_EXIST) {
1219                 l_new = prealloc_lru_pop(htab, key, hash);
1220                 if (!l_new)
1221                         return -ENOMEM;
1222         }
1223
1224         ret = htab_lock_bucket(htab, b, hash, &flags);
1225         if (ret)
1226                 return ret;
1227
1228         l_old = lookup_elem_raw(head, hash, key, key_size);
1229
1230         ret = check_flags(htab, l_old, map_flags);
1231         if (ret)
1232                 goto err;
1233
1234         if (l_old) {
1235                 bpf_lru_node_set_ref(&l_old->lru_node);
1236
1237                 /* per-cpu hash map can update value in-place */
1238                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1239                                 value, onallcpus);
1240         } else {
1241                 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1242                                 value, onallcpus);
1243                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1244                 l_new = NULL;
1245         }
1246         ret = 0;
1247 err:
1248         htab_unlock_bucket(htab, b, hash, flags);
1249         if (l_new)
1250                 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1251         return ret;
1252 }
1253
1254 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1255                                        void *value, u64 map_flags)
1256 {
1257         return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1258 }
1259
1260 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1261                                            void *value, u64 map_flags)
1262 {
1263         return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1264                                                  false);
1265 }
1266
1267 /* Called from syscall or from eBPF program */
1268 static int htab_map_delete_elem(struct bpf_map *map, void *key)
1269 {
1270         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1271         struct hlist_nulls_head *head;
1272         struct bucket *b;
1273         struct htab_elem *l;
1274         unsigned long flags;
1275         u32 hash, key_size;
1276         int ret;
1277
1278         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
1279
1280         key_size = map->key_size;
1281
1282         hash = htab_map_hash(key, key_size, htab->hashrnd);
1283         b = __select_bucket(htab, hash);
1284         head = &b->head;
1285
1286         ret = htab_lock_bucket(htab, b, hash, &flags);
1287         if (ret)
1288                 return ret;
1289
1290         l = lookup_elem_raw(head, hash, key, key_size);
1291
1292         if (l) {
1293                 hlist_nulls_del_rcu(&l->hash_node);
1294                 free_htab_elem(htab, l);
1295         } else {
1296                 ret = -ENOENT;
1297         }
1298
1299         htab_unlock_bucket(htab, b, hash, flags);
1300         return ret;
1301 }
1302
1303 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1304 {
1305         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1306         struct hlist_nulls_head *head;
1307         struct bucket *b;
1308         struct htab_elem *l;
1309         unsigned long flags;
1310         u32 hash, key_size;
1311         int ret;
1312
1313         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
1314
1315         key_size = map->key_size;
1316
1317         hash = htab_map_hash(key, key_size, htab->hashrnd);
1318         b = __select_bucket(htab, hash);
1319         head = &b->head;
1320
1321         ret = htab_lock_bucket(htab, b, hash, &flags);
1322         if (ret)
1323                 return ret;
1324
1325         l = lookup_elem_raw(head, hash, key, key_size);
1326
1327         if (l)
1328                 hlist_nulls_del_rcu(&l->hash_node);
1329         else
1330                 ret = -ENOENT;
1331
1332         htab_unlock_bucket(htab, b, hash, flags);
1333         if (l)
1334                 bpf_lru_push_free(&htab->lru, &l->lru_node);
1335         return ret;
1336 }
1337
1338 static void delete_all_elements(struct bpf_htab *htab)
1339 {
1340         int i;
1341
1342         for (i = 0; i < htab->n_buckets; i++) {
1343                 struct hlist_nulls_head *head = select_bucket(htab, i);
1344                 struct hlist_nulls_node *n;
1345                 struct htab_elem *l;
1346
1347                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1348                         hlist_nulls_del_rcu(&l->hash_node);
1349                         htab_elem_free(htab, l);
1350                 }
1351         }
1352 }
1353
1354 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1355 static void htab_map_free(struct bpf_map *map)
1356 {
1357         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1358         int i;
1359
1360         /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1361          * bpf_free_used_maps() is called after bpf prog is no longer executing.
1362          * There is no need to synchronize_rcu() here to protect map elements.
1363          */
1364
1365         /* some of free_htab_elem() callbacks for elements of this map may
1366          * not have executed. Wait for them.
1367          */
1368         rcu_barrier();
1369         if (!htab_is_prealloc(htab))
1370                 delete_all_elements(htab);
1371         else
1372                 prealloc_destroy(htab);
1373
1374         free_percpu(htab->extra_elems);
1375         bpf_map_area_free(htab->buckets);
1376         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1377                 free_percpu(htab->map_locked[i]);
1378         lockdep_unregister_key(&htab->lockdep_key);
1379         kfree(htab);
1380 }
1381
1382 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1383                                    struct seq_file *m)
1384 {
1385         void *value;
1386
1387         rcu_read_lock();
1388
1389         value = htab_map_lookup_elem(map, key);
1390         if (!value) {
1391                 rcu_read_unlock();
1392                 return;
1393         }
1394
1395         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1396         seq_puts(m, ": ");
1397         btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1398         seq_puts(m, "\n");
1399
1400         rcu_read_unlock();
1401 }
1402
1403 static int
1404 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1405                                    const union bpf_attr *attr,
1406                                    union bpf_attr __user *uattr,
1407                                    bool do_delete, bool is_lru_map,
1408                                    bool is_percpu)
1409 {
1410         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1411         u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1412         void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1413         void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1414         void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1415         void *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1416         u32 batch, max_count, size, bucket_size;
1417         struct htab_elem *node_to_free = NULL;
1418         u64 elem_map_flags, map_flags;
1419         struct hlist_nulls_head *head;
1420         struct hlist_nulls_node *n;
1421         unsigned long flags = 0;
1422         bool locked = false;
1423         struct htab_elem *l;
1424         struct bucket *b;
1425         int ret = 0;
1426
1427         elem_map_flags = attr->batch.elem_flags;
1428         if ((elem_map_flags & ~BPF_F_LOCK) ||
1429             ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
1430                 return -EINVAL;
1431
1432         map_flags = attr->batch.flags;
1433         if (map_flags)
1434                 return -EINVAL;
1435
1436         max_count = attr->batch.count;
1437         if (!max_count)
1438                 return 0;
1439
1440         if (put_user(0, &uattr->batch.count))
1441                 return -EFAULT;
1442
1443         batch = 0;
1444         if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1445                 return -EFAULT;
1446
1447         if (batch >= htab->n_buckets)
1448                 return -ENOENT;
1449
1450         key_size = htab->map.key_size;
1451         roundup_key_size = round_up(htab->map.key_size, 8);
1452         value_size = htab->map.value_size;
1453         size = round_up(value_size, 8);
1454         if (is_percpu)
1455                 value_size = size * num_possible_cpus();
1456         total = 0;
1457         /* while experimenting with hash tables with sizes ranging from 10 to
1458          * 1000, it was observed that a bucket can have upto 5 entries.
1459          */
1460         bucket_size = 5;
1461
1462 alloc:
1463         /* We cannot do copy_from_user or copy_to_user inside
1464          * the rcu_read_lock. Allocate enough space here.
1465          */
1466         keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN);
1467         values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN);
1468         if (!keys || !values) {
1469                 ret = -ENOMEM;
1470                 goto after_loop;
1471         }
1472
1473 again:
1474         bpf_disable_instrumentation();
1475         rcu_read_lock();
1476 again_nocopy:
1477         dst_key = keys;
1478         dst_val = values;
1479         b = &htab->buckets[batch];
1480         head = &b->head;
1481         /* do not grab the lock unless need it (bucket_cnt > 0). */
1482         if (locked) {
1483                 ret = htab_lock_bucket(htab, b, batch, &flags);
1484                 if (ret)
1485                         goto next_batch;
1486         }
1487
1488         bucket_cnt = 0;
1489         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1490                 bucket_cnt++;
1491
1492         if (bucket_cnt && !locked) {
1493                 locked = true;
1494                 goto again_nocopy;
1495         }
1496
1497         if (bucket_cnt > (max_count - total)) {
1498                 if (total == 0)
1499                         ret = -ENOSPC;
1500                 /* Note that since bucket_cnt > 0 here, it is implicit
1501                  * that the locked was grabbed, so release it.
1502                  */
1503                 htab_unlock_bucket(htab, b, batch, flags);
1504                 rcu_read_unlock();
1505                 bpf_enable_instrumentation();
1506                 goto after_loop;
1507         }
1508
1509         if (bucket_cnt > bucket_size) {
1510                 bucket_size = bucket_cnt;
1511                 /* Note that since bucket_cnt > 0 here, it is implicit
1512                  * that the locked was grabbed, so release it.
1513                  */
1514                 htab_unlock_bucket(htab, b, batch, flags);
1515                 rcu_read_unlock();
1516                 bpf_enable_instrumentation();
1517                 kvfree(keys);
1518                 kvfree(values);
1519                 goto alloc;
1520         }
1521
1522         /* Next block is only safe to run if you have grabbed the lock */
1523         if (!locked)
1524                 goto next_batch;
1525
1526         hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1527                 memcpy(dst_key, l->key, key_size);
1528
1529                 if (is_percpu) {
1530                         int off = 0, cpu;
1531                         void __percpu *pptr;
1532
1533                         pptr = htab_elem_get_ptr(l, map->key_size);
1534                         for_each_possible_cpu(cpu) {
1535                                 bpf_long_memcpy(dst_val + off,
1536                                                 per_cpu_ptr(pptr, cpu), size);
1537                                 off += size;
1538                         }
1539                 } else {
1540                         value = l->key + roundup_key_size;
1541                         if (elem_map_flags & BPF_F_LOCK)
1542                                 copy_map_value_locked(map, dst_val, value,
1543                                                       true);
1544                         else
1545                                 copy_map_value(map, dst_val, value);
1546                         check_and_init_map_lock(map, dst_val);
1547                 }
1548                 if (do_delete) {
1549                         hlist_nulls_del_rcu(&l->hash_node);
1550
1551                         /* bpf_lru_push_free() will acquire lru_lock, which
1552                          * may cause deadlock. See comments in function
1553                          * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1554                          * after releasing the bucket lock.
1555                          */
1556                         if (is_lru_map) {
1557                                 l->batch_flink = node_to_free;
1558                                 node_to_free = l;
1559                         } else {
1560                                 free_htab_elem(htab, l);
1561                         }
1562                 }
1563                 dst_key += key_size;
1564                 dst_val += value_size;
1565         }
1566
1567         htab_unlock_bucket(htab, b, batch, flags);
1568         locked = false;
1569
1570         while (node_to_free) {
1571                 l = node_to_free;
1572                 node_to_free = node_to_free->batch_flink;
1573                 bpf_lru_push_free(&htab->lru, &l->lru_node);
1574         }
1575
1576 next_batch:
1577         /* If we are not copying data, we can go to next bucket and avoid
1578          * unlocking the rcu.
1579          */
1580         if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1581                 batch++;
1582                 goto again_nocopy;
1583         }
1584
1585         rcu_read_unlock();
1586         bpf_enable_instrumentation();
1587         if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1588             key_size * bucket_cnt) ||
1589             copy_to_user(uvalues + total * value_size, values,
1590             value_size * bucket_cnt))) {
1591                 ret = -EFAULT;
1592                 goto after_loop;
1593         }
1594
1595         total += bucket_cnt;
1596         batch++;
1597         if (batch >= htab->n_buckets) {
1598                 ret = -ENOENT;
1599                 goto after_loop;
1600         }
1601         goto again;
1602
1603 after_loop:
1604         if (ret == -EFAULT)
1605                 goto out;
1606
1607         /* copy # of entries and next batch */
1608         ubatch = u64_to_user_ptr(attr->batch.out_batch);
1609         if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1610             put_user(total, &uattr->batch.count))
1611                 ret = -EFAULT;
1612
1613 out:
1614         kvfree(keys);
1615         kvfree(values);
1616         return ret;
1617 }
1618
1619 static int
1620 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1621                              union bpf_attr __user *uattr)
1622 {
1623         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1624                                                   false, true);
1625 }
1626
1627 static int
1628 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1629                                         const union bpf_attr *attr,
1630                                         union bpf_attr __user *uattr)
1631 {
1632         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1633                                                   false, true);
1634 }
1635
1636 static int
1637 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1638                       union bpf_attr __user *uattr)
1639 {
1640         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1641                                                   false, false);
1642 }
1643
1644 static int
1645 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1646                                  const union bpf_attr *attr,
1647                                  union bpf_attr __user *uattr)
1648 {
1649         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1650                                                   false, false);
1651 }
1652
1653 static int
1654 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1655                                  const union bpf_attr *attr,
1656                                  union bpf_attr __user *uattr)
1657 {
1658         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1659                                                   true, true);
1660 }
1661
1662 static int
1663 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1664                                             const union bpf_attr *attr,
1665                                             union bpf_attr __user *uattr)
1666 {
1667         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1668                                                   true, true);
1669 }
1670
1671 static int
1672 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1673                           union bpf_attr __user *uattr)
1674 {
1675         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1676                                                   true, false);
1677 }
1678
1679 static int
1680 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1681                                      const union bpf_attr *attr,
1682                                      union bpf_attr __user *uattr)
1683 {
1684         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1685                                                   true, false);
1686 }
1687
1688 struct bpf_iter_seq_hash_map_info {
1689         struct bpf_map *map;
1690         struct bpf_htab *htab;
1691         void *percpu_value_buf; // non-zero means percpu hash
1692         u32 bucket_id;
1693         u32 skip_elems;
1694 };
1695
1696 static struct htab_elem *
1697 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
1698                            struct htab_elem *prev_elem)
1699 {
1700         const struct bpf_htab *htab = info->htab;
1701         u32 skip_elems = info->skip_elems;
1702         u32 bucket_id = info->bucket_id;
1703         struct hlist_nulls_head *head;
1704         struct hlist_nulls_node *n;
1705         struct htab_elem *elem;
1706         struct bucket *b;
1707         u32 i, count;
1708
1709         if (bucket_id >= htab->n_buckets)
1710                 return NULL;
1711
1712         /* try to find next elem in the same bucket */
1713         if (prev_elem) {
1714                 /* no update/deletion on this bucket, prev_elem should be still valid
1715                  * and we won't skip elements.
1716                  */
1717                 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
1718                 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
1719                 if (elem)
1720                         return elem;
1721
1722                 /* not found, unlock and go to the next bucket */
1723                 b = &htab->buckets[bucket_id++];
1724                 rcu_read_unlock();
1725                 skip_elems = 0;
1726         }
1727
1728         for (i = bucket_id; i < htab->n_buckets; i++) {
1729                 b = &htab->buckets[i];
1730                 rcu_read_lock();
1731
1732                 count = 0;
1733                 head = &b->head;
1734                 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
1735                         if (count >= skip_elems) {
1736                                 info->bucket_id = i;
1737                                 info->skip_elems = count;
1738                                 return elem;
1739                         }
1740                         count++;
1741                 }
1742
1743                 rcu_read_unlock();
1744                 skip_elems = 0;
1745         }
1746
1747         info->bucket_id = i;
1748         info->skip_elems = 0;
1749         return NULL;
1750 }
1751
1752 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
1753 {
1754         struct bpf_iter_seq_hash_map_info *info = seq->private;
1755         struct htab_elem *elem;
1756
1757         elem = bpf_hash_map_seq_find_next(info, NULL);
1758         if (!elem)
1759                 return NULL;
1760
1761         if (*pos == 0)
1762                 ++*pos;
1763         return elem;
1764 }
1765
1766 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1767 {
1768         struct bpf_iter_seq_hash_map_info *info = seq->private;
1769
1770         ++*pos;
1771         ++info->skip_elems;
1772         return bpf_hash_map_seq_find_next(info, v);
1773 }
1774
1775 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
1776 {
1777         struct bpf_iter_seq_hash_map_info *info = seq->private;
1778         u32 roundup_key_size, roundup_value_size;
1779         struct bpf_iter__bpf_map_elem ctx = {};
1780         struct bpf_map *map = info->map;
1781         struct bpf_iter_meta meta;
1782         int ret = 0, off = 0, cpu;
1783         struct bpf_prog *prog;
1784         void __percpu *pptr;
1785
1786         meta.seq = seq;
1787         prog = bpf_iter_get_info(&meta, elem == NULL);
1788         if (prog) {
1789                 ctx.meta = &meta;
1790                 ctx.map = info->map;
1791                 if (elem) {
1792                         roundup_key_size = round_up(map->key_size, 8);
1793                         ctx.key = elem->key;
1794                         if (!info->percpu_value_buf) {
1795                                 ctx.value = elem->key + roundup_key_size;
1796                         } else {
1797                                 roundup_value_size = round_up(map->value_size, 8);
1798                                 pptr = htab_elem_get_ptr(elem, map->key_size);
1799                                 for_each_possible_cpu(cpu) {
1800                                         bpf_long_memcpy(info->percpu_value_buf + off,
1801                                                         per_cpu_ptr(pptr, cpu),
1802                                                         roundup_value_size);
1803                                         off += roundup_value_size;
1804                                 }
1805                                 ctx.value = info->percpu_value_buf;
1806                         }
1807                 }
1808                 ret = bpf_iter_run_prog(prog, &ctx);
1809         }
1810
1811         return ret;
1812 }
1813
1814 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
1815 {
1816         return __bpf_hash_map_seq_show(seq, v);
1817 }
1818
1819 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
1820 {
1821         if (!v)
1822                 (void)__bpf_hash_map_seq_show(seq, NULL);
1823         else
1824                 rcu_read_unlock();
1825 }
1826
1827 static int bpf_iter_init_hash_map(void *priv_data,
1828                                   struct bpf_iter_aux_info *aux)
1829 {
1830         struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
1831         struct bpf_map *map = aux->map;
1832         void *value_buf;
1833         u32 buf_size;
1834
1835         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1836             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1837                 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
1838                 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
1839                 if (!value_buf)
1840                         return -ENOMEM;
1841
1842                 seq_info->percpu_value_buf = value_buf;
1843         }
1844
1845         seq_info->map = map;
1846         seq_info->htab = container_of(map, struct bpf_htab, map);
1847         return 0;
1848 }
1849
1850 static void bpf_iter_fini_hash_map(void *priv_data)
1851 {
1852         struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
1853
1854         kfree(seq_info->percpu_value_buf);
1855 }
1856
1857 static const struct seq_operations bpf_hash_map_seq_ops = {
1858         .start  = bpf_hash_map_seq_start,
1859         .next   = bpf_hash_map_seq_next,
1860         .stop   = bpf_hash_map_seq_stop,
1861         .show   = bpf_hash_map_seq_show,
1862 };
1863
1864 static const struct bpf_iter_seq_info iter_seq_info = {
1865         .seq_ops                = &bpf_hash_map_seq_ops,
1866         .init_seq_private       = bpf_iter_init_hash_map,
1867         .fini_seq_private       = bpf_iter_fini_hash_map,
1868         .seq_priv_size          = sizeof(struct bpf_iter_seq_hash_map_info),
1869 };
1870
1871 static int htab_map_btf_id;
1872 const struct bpf_map_ops htab_map_ops = {
1873         .map_meta_equal = bpf_map_meta_equal,
1874         .map_alloc_check = htab_map_alloc_check,
1875         .map_alloc = htab_map_alloc,
1876         .map_free = htab_map_free,
1877         .map_get_next_key = htab_map_get_next_key,
1878         .map_lookup_elem = htab_map_lookup_elem,
1879         .map_update_elem = htab_map_update_elem,
1880         .map_delete_elem = htab_map_delete_elem,
1881         .map_gen_lookup = htab_map_gen_lookup,
1882         .map_seq_show_elem = htab_map_seq_show_elem,
1883         BATCH_OPS(htab),
1884         .map_btf_name = "bpf_htab",
1885         .map_btf_id = &htab_map_btf_id,
1886         .iter_seq_info = &iter_seq_info,
1887 };
1888
1889 static int htab_lru_map_btf_id;
1890 const struct bpf_map_ops htab_lru_map_ops = {
1891         .map_meta_equal = bpf_map_meta_equal,
1892         .map_alloc_check = htab_map_alloc_check,
1893         .map_alloc = htab_map_alloc,
1894         .map_free = htab_map_free,
1895         .map_get_next_key = htab_map_get_next_key,
1896         .map_lookup_elem = htab_lru_map_lookup_elem,
1897         .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
1898         .map_update_elem = htab_lru_map_update_elem,
1899         .map_delete_elem = htab_lru_map_delete_elem,
1900         .map_gen_lookup = htab_lru_map_gen_lookup,
1901         .map_seq_show_elem = htab_map_seq_show_elem,
1902         BATCH_OPS(htab_lru),
1903         .map_btf_name = "bpf_htab",
1904         .map_btf_id = &htab_lru_map_btf_id,
1905         .iter_seq_info = &iter_seq_info,
1906 };
1907
1908 /* Called from eBPF program */
1909 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1910 {
1911         struct htab_elem *l = __htab_map_lookup_elem(map, key);
1912
1913         if (l)
1914                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1915         else
1916                 return NULL;
1917 }
1918
1919 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1920 {
1921         struct htab_elem *l = __htab_map_lookup_elem(map, key);
1922
1923         if (l) {
1924                 bpf_lru_node_set_ref(&l->lru_node);
1925                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1926         }
1927
1928         return NULL;
1929 }
1930
1931 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1932 {
1933         struct htab_elem *l;
1934         void __percpu *pptr;
1935         int ret = -ENOENT;
1936         int cpu, off = 0;
1937         u32 size;
1938
1939         /* per_cpu areas are zero-filled and bpf programs can only
1940          * access 'value_size' of them, so copying rounded areas
1941          * will not leak any kernel data
1942          */
1943         size = round_up(map->value_size, 8);
1944         rcu_read_lock();
1945         l = __htab_map_lookup_elem(map, key);
1946         if (!l)
1947                 goto out;
1948         /* We do not mark LRU map element here in order to not mess up
1949          * eviction heuristics when user space does a map walk.
1950          */
1951         pptr = htab_elem_get_ptr(l, map->key_size);
1952         for_each_possible_cpu(cpu) {
1953                 bpf_long_memcpy(value + off,
1954                                 per_cpu_ptr(pptr, cpu), size);
1955                 off += size;
1956         }
1957         ret = 0;
1958 out:
1959         rcu_read_unlock();
1960         return ret;
1961 }
1962
1963 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1964                            u64 map_flags)
1965 {
1966         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1967         int ret;
1968
1969         rcu_read_lock();
1970         if (htab_is_lru(htab))
1971                 ret = __htab_lru_percpu_map_update_elem(map, key, value,
1972                                                         map_flags, true);
1973         else
1974                 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
1975                                                     true);
1976         rcu_read_unlock();
1977
1978         return ret;
1979 }
1980
1981 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
1982                                           struct seq_file *m)
1983 {
1984         struct htab_elem *l;
1985         void __percpu *pptr;
1986         int cpu;
1987
1988         rcu_read_lock();
1989
1990         l = __htab_map_lookup_elem(map, key);
1991         if (!l) {
1992                 rcu_read_unlock();
1993                 return;
1994         }
1995
1996         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1997         seq_puts(m, ": {\n");
1998         pptr = htab_elem_get_ptr(l, map->key_size);
1999         for_each_possible_cpu(cpu) {
2000                 seq_printf(m, "\tcpu%d: ", cpu);
2001                 btf_type_seq_show(map->btf, map->btf_value_type_id,
2002                                   per_cpu_ptr(pptr, cpu), m);
2003                 seq_puts(m, "\n");
2004         }
2005         seq_puts(m, "}\n");
2006
2007         rcu_read_unlock();
2008 }
2009
2010 static int htab_percpu_map_btf_id;
2011 const struct bpf_map_ops htab_percpu_map_ops = {
2012         .map_meta_equal = bpf_map_meta_equal,
2013         .map_alloc_check = htab_map_alloc_check,
2014         .map_alloc = htab_map_alloc,
2015         .map_free = htab_map_free,
2016         .map_get_next_key = htab_map_get_next_key,
2017         .map_lookup_elem = htab_percpu_map_lookup_elem,
2018         .map_update_elem = htab_percpu_map_update_elem,
2019         .map_delete_elem = htab_map_delete_elem,
2020         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2021         BATCH_OPS(htab_percpu),
2022         .map_btf_name = "bpf_htab",
2023         .map_btf_id = &htab_percpu_map_btf_id,
2024         .iter_seq_info = &iter_seq_info,
2025 };
2026
2027 static int htab_lru_percpu_map_btf_id;
2028 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2029         .map_meta_equal = bpf_map_meta_equal,
2030         .map_alloc_check = htab_map_alloc_check,
2031         .map_alloc = htab_map_alloc,
2032         .map_free = htab_map_free,
2033         .map_get_next_key = htab_map_get_next_key,
2034         .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2035         .map_update_elem = htab_lru_percpu_map_update_elem,
2036         .map_delete_elem = htab_lru_map_delete_elem,
2037         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2038         BATCH_OPS(htab_lru_percpu),
2039         .map_btf_name = "bpf_htab",
2040         .map_btf_id = &htab_lru_percpu_map_btf_id,
2041         .iter_seq_info = &iter_seq_info,
2042 };
2043
2044 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2045 {
2046         if (attr->value_size != sizeof(u32))
2047                 return -EINVAL;
2048         return htab_map_alloc_check(attr);
2049 }
2050
2051 static void fd_htab_map_free(struct bpf_map *map)
2052 {
2053         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2054         struct hlist_nulls_node *n;
2055         struct hlist_nulls_head *head;
2056         struct htab_elem *l;
2057         int i;
2058
2059         for (i = 0; i < htab->n_buckets; i++) {
2060                 head = select_bucket(htab, i);
2061
2062                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2063                         void *ptr = fd_htab_map_get_ptr(map, l);
2064
2065                         map->ops->map_fd_put_ptr(ptr);
2066                 }
2067         }
2068
2069         htab_map_free(map);
2070 }
2071
2072 /* only called from syscall */
2073 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2074 {
2075         void **ptr;
2076         int ret = 0;
2077
2078         if (!map->ops->map_fd_sys_lookup_elem)
2079                 return -ENOTSUPP;
2080
2081         rcu_read_lock();
2082         ptr = htab_map_lookup_elem(map, key);
2083         if (ptr)
2084                 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2085         else
2086                 ret = -ENOENT;
2087         rcu_read_unlock();
2088
2089         return ret;
2090 }
2091
2092 /* only called from syscall */
2093 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2094                                 void *key, void *value, u64 map_flags)
2095 {
2096         void *ptr;
2097         int ret;
2098         u32 ufd = *(u32 *)value;
2099
2100         ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2101         if (IS_ERR(ptr))
2102                 return PTR_ERR(ptr);
2103
2104         ret = htab_map_update_elem(map, key, &ptr, map_flags);
2105         if (ret)
2106                 map->ops->map_fd_put_ptr(ptr);
2107
2108         return ret;
2109 }
2110
2111 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2112 {
2113         struct bpf_map *map, *inner_map_meta;
2114
2115         inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2116         if (IS_ERR(inner_map_meta))
2117                 return inner_map_meta;
2118
2119         map = htab_map_alloc(attr);
2120         if (IS_ERR(map)) {
2121                 bpf_map_meta_free(inner_map_meta);
2122                 return map;
2123         }
2124
2125         map->inner_map_meta = inner_map_meta;
2126
2127         return map;
2128 }
2129
2130 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2131 {
2132         struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2133
2134         if (!inner_map)
2135                 return NULL;
2136
2137         return READ_ONCE(*inner_map);
2138 }
2139
2140 static int htab_of_map_gen_lookup(struct bpf_map *map,
2141                                   struct bpf_insn *insn_buf)
2142 {
2143         struct bpf_insn *insn = insn_buf;
2144         const int ret = BPF_REG_0;
2145
2146         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2147                      (void *(*)(struct bpf_map *map, void *key))NULL));
2148         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
2149         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2150         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2151                                 offsetof(struct htab_elem, key) +
2152                                 round_up(map->key_size, 8));
2153         *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2154
2155         return insn - insn_buf;
2156 }
2157
2158 static void htab_of_map_free(struct bpf_map *map)
2159 {
2160         bpf_map_meta_free(map->inner_map_meta);
2161         fd_htab_map_free(map);
2162 }
2163
2164 static int htab_of_maps_map_btf_id;
2165 const struct bpf_map_ops htab_of_maps_map_ops = {
2166         .map_alloc_check = fd_htab_map_alloc_check,
2167         .map_alloc = htab_of_map_alloc,
2168         .map_free = htab_of_map_free,
2169         .map_get_next_key = htab_map_get_next_key,
2170         .map_lookup_elem = htab_of_map_lookup_elem,
2171         .map_delete_elem = htab_map_delete_elem,
2172         .map_fd_get_ptr = bpf_map_fd_get_ptr,
2173         .map_fd_put_ptr = bpf_map_fd_put_ptr,
2174         .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2175         .map_gen_lookup = htab_of_map_gen_lookup,
2176         .map_check_btf = map_check_no_btf,
2177         .map_btf_name = "bpf_htab",
2178         .map_btf_id = &htab_of_maps_map_btf_id,
2179 };