bpf: Set map_btf_{name, id} for all map types
[linux-2.6-microblaze.git] / kernel / bpf / hashtab.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include "percpu_freelist.h"
13 #include "bpf_lru_list.h"
14 #include "map_in_map.h"
15
16 #define HTAB_CREATE_FLAG_MASK                                           \
17         (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |    \
18          BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
19
20 #define BATCH_OPS(_name)                        \
21         .map_lookup_batch =                     \
22         _name##_map_lookup_batch,               \
23         .map_lookup_and_delete_batch =          \
24         _name##_map_lookup_and_delete_batch,    \
25         .map_update_batch =                     \
26         generic_map_update_batch,               \
27         .map_delete_batch =                     \
28         generic_map_delete_batch
29
30 /*
31  * The bucket lock has two protection scopes:
32  *
33  * 1) Serializing concurrent operations from BPF programs on differrent
34  *    CPUs
35  *
36  * 2) Serializing concurrent operations from BPF programs and sys_bpf()
37  *
38  * BPF programs can execute in any context including perf, kprobes and
39  * tracing. As there are almost no limits where perf, kprobes and tracing
40  * can be invoked from the lock operations need to be protected against
41  * deadlocks. Deadlocks can be caused by recursion and by an invocation in
42  * the lock held section when functions which acquire this lock are invoked
43  * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
44  * variable bpf_prog_active, which prevents BPF programs attached to perf
45  * events, kprobes and tracing to be invoked before the prior invocation
46  * from one of these contexts completed. sys_bpf() uses the same mechanism
47  * by pinning the task to the current CPU and incrementing the recursion
48  * protection accross the map operation.
49  *
50  * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
51  * operations like memory allocations (even with GFP_ATOMIC) from atomic
52  * contexts. This is required because even with GFP_ATOMIC the memory
53  * allocator calls into code pathes which acquire locks with long held lock
54  * sections. To ensure the deterministic behaviour these locks are regular
55  * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
56  * true atomic contexts on an RT kernel are the low level hardware
57  * handling, scheduling, low level interrupt handling, NMIs etc. None of
58  * these contexts should ever do memory allocations.
59  *
60  * As regular device interrupt handlers and soft interrupts are forced into
61  * thread context, the existing code which does
62  *   spin_lock*(); alloc(GPF_ATOMIC); spin_unlock*();
63  * just works.
64  *
65  * In theory the BPF locks could be converted to regular spinlocks as well,
66  * but the bucket locks and percpu_freelist locks can be taken from
67  * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
68  * atomic contexts even on RT. These mechanisms require preallocated maps,
69  * so there is no need to invoke memory allocations within the lock held
70  * sections.
71  *
72  * BPF maps which need dynamic allocation are only used from (forced)
73  * thread context on RT and can therefore use regular spinlocks which in
74  * turn allows to invoke memory allocations from the lock held section.
75  *
76  * On a non RT kernel this distinction is neither possible nor required.
77  * spinlock maps to raw_spinlock and the extra code is optimized out by the
78  * compiler.
79  */
80 struct bucket {
81         struct hlist_nulls_head head;
82         union {
83                 raw_spinlock_t raw_lock;
84                 spinlock_t     lock;
85         };
86 };
87
88 struct bpf_htab {
89         struct bpf_map map;
90         struct bucket *buckets;
91         void *elems;
92         union {
93                 struct pcpu_freelist freelist;
94                 struct bpf_lru lru;
95         };
96         struct htab_elem *__percpu *extra_elems;
97         atomic_t count; /* number of elements in this hashtable */
98         u32 n_buckets;  /* number of hash buckets */
99         u32 elem_size;  /* size of each element in bytes */
100         u32 hashrnd;
101 };
102
103 /* each htab element is struct htab_elem + key + value */
104 struct htab_elem {
105         union {
106                 struct hlist_nulls_node hash_node;
107                 struct {
108                         void *padding;
109                         union {
110                                 struct bpf_htab *htab;
111                                 struct pcpu_freelist_node fnode;
112                                 struct htab_elem *batch_flink;
113                         };
114                 };
115         };
116         union {
117                 struct rcu_head rcu;
118                 struct bpf_lru_node lru_node;
119         };
120         u32 hash;
121         char key[] __aligned(8);
122 };
123
124 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
125 {
126         return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
127 }
128
129 static inline bool htab_use_raw_lock(const struct bpf_htab *htab)
130 {
131         return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab));
132 }
133
134 static void htab_init_buckets(struct bpf_htab *htab)
135 {
136         unsigned i;
137
138         for (i = 0; i < htab->n_buckets; i++) {
139                 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
140                 if (htab_use_raw_lock(htab))
141                         raw_spin_lock_init(&htab->buckets[i].raw_lock);
142                 else
143                         spin_lock_init(&htab->buckets[i].lock);
144         }
145 }
146
147 static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab,
148                                              struct bucket *b)
149 {
150         unsigned long flags;
151
152         if (htab_use_raw_lock(htab))
153                 raw_spin_lock_irqsave(&b->raw_lock, flags);
154         else
155                 spin_lock_irqsave(&b->lock, flags);
156         return flags;
157 }
158
159 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
160                                       struct bucket *b,
161                                       unsigned long flags)
162 {
163         if (htab_use_raw_lock(htab))
164                 raw_spin_unlock_irqrestore(&b->raw_lock, flags);
165         else
166                 spin_unlock_irqrestore(&b->lock, flags);
167 }
168
169 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
170
171 static bool htab_is_lru(const struct bpf_htab *htab)
172 {
173         return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
174                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
175 }
176
177 static bool htab_is_percpu(const struct bpf_htab *htab)
178 {
179         return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
180                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
181 }
182
183 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
184                                      void __percpu *pptr)
185 {
186         *(void __percpu **)(l->key + key_size) = pptr;
187 }
188
189 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
190 {
191         return *(void __percpu **)(l->key + key_size);
192 }
193
194 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
195 {
196         return *(void **)(l->key + roundup(map->key_size, 8));
197 }
198
199 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
200 {
201         return (struct htab_elem *) (htab->elems + i * htab->elem_size);
202 }
203
204 static void htab_free_elems(struct bpf_htab *htab)
205 {
206         int i;
207
208         if (!htab_is_percpu(htab))
209                 goto free_elems;
210
211         for (i = 0; i < htab->map.max_entries; i++) {
212                 void __percpu *pptr;
213
214                 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
215                                          htab->map.key_size);
216                 free_percpu(pptr);
217                 cond_resched();
218         }
219 free_elems:
220         bpf_map_area_free(htab->elems);
221 }
222
223 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
224  * (bucket_lock). If both locks need to be acquired together, the lock
225  * order is always lru_lock -> bucket_lock and this only happens in
226  * bpf_lru_list.c logic. For example, certain code path of
227  * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
228  * will acquire lru_lock first followed by acquiring bucket_lock.
229  *
230  * In hashtab.c, to avoid deadlock, lock acquisition of
231  * bucket_lock followed by lru_lock is not allowed. In such cases,
232  * bucket_lock needs to be released first before acquiring lru_lock.
233  */
234 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
235                                           u32 hash)
236 {
237         struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
238         struct htab_elem *l;
239
240         if (node) {
241                 l = container_of(node, struct htab_elem, lru_node);
242                 memcpy(l->key, key, htab->map.key_size);
243                 return l;
244         }
245
246         return NULL;
247 }
248
249 static int prealloc_init(struct bpf_htab *htab)
250 {
251         u32 num_entries = htab->map.max_entries;
252         int err = -ENOMEM, i;
253
254         if (!htab_is_percpu(htab) && !htab_is_lru(htab))
255                 num_entries += num_possible_cpus();
256
257         htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
258                                          htab->map.numa_node);
259         if (!htab->elems)
260                 return -ENOMEM;
261
262         if (!htab_is_percpu(htab))
263                 goto skip_percpu_elems;
264
265         for (i = 0; i < num_entries; i++) {
266                 u32 size = round_up(htab->map.value_size, 8);
267                 void __percpu *pptr;
268
269                 pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
270                 if (!pptr)
271                         goto free_elems;
272                 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
273                                   pptr);
274                 cond_resched();
275         }
276
277 skip_percpu_elems:
278         if (htab_is_lru(htab))
279                 err = bpf_lru_init(&htab->lru,
280                                    htab->map.map_flags & BPF_F_NO_COMMON_LRU,
281                                    offsetof(struct htab_elem, hash) -
282                                    offsetof(struct htab_elem, lru_node),
283                                    htab_lru_map_delete_node,
284                                    htab);
285         else
286                 err = pcpu_freelist_init(&htab->freelist);
287
288         if (err)
289                 goto free_elems;
290
291         if (htab_is_lru(htab))
292                 bpf_lru_populate(&htab->lru, htab->elems,
293                                  offsetof(struct htab_elem, lru_node),
294                                  htab->elem_size, num_entries);
295         else
296                 pcpu_freelist_populate(&htab->freelist,
297                                        htab->elems + offsetof(struct htab_elem, fnode),
298                                        htab->elem_size, num_entries);
299
300         return 0;
301
302 free_elems:
303         htab_free_elems(htab);
304         return err;
305 }
306
307 static void prealloc_destroy(struct bpf_htab *htab)
308 {
309         htab_free_elems(htab);
310
311         if (htab_is_lru(htab))
312                 bpf_lru_destroy(&htab->lru);
313         else
314                 pcpu_freelist_destroy(&htab->freelist);
315 }
316
317 static int alloc_extra_elems(struct bpf_htab *htab)
318 {
319         struct htab_elem *__percpu *pptr, *l_new;
320         struct pcpu_freelist_node *l;
321         int cpu;
322
323         pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
324                                   GFP_USER | __GFP_NOWARN);
325         if (!pptr)
326                 return -ENOMEM;
327
328         for_each_possible_cpu(cpu) {
329                 l = pcpu_freelist_pop(&htab->freelist);
330                 /* pop will succeed, since prealloc_init()
331                  * preallocated extra num_possible_cpus elements
332                  */
333                 l_new = container_of(l, struct htab_elem, fnode);
334                 *per_cpu_ptr(pptr, cpu) = l_new;
335         }
336         htab->extra_elems = pptr;
337         return 0;
338 }
339
340 /* Called from syscall */
341 static int htab_map_alloc_check(union bpf_attr *attr)
342 {
343         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
344                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
345         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
346                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
347         /* percpu_lru means each cpu has its own LRU list.
348          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
349          * the map's value itself is percpu.  percpu_lru has
350          * nothing to do with the map's value.
351          */
352         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
353         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
354         bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
355         int numa_node = bpf_map_attr_numa_node(attr);
356
357         BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
358                      offsetof(struct htab_elem, hash_node.pprev));
359         BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
360                      offsetof(struct htab_elem, hash_node.pprev));
361
362         if (lru && !bpf_capable())
363                 /* LRU implementation is much complicated than other
364                  * maps.  Hence, limit to CAP_BPF.
365                  */
366                 return -EPERM;
367
368         if (zero_seed && !capable(CAP_SYS_ADMIN))
369                 /* Guard against local DoS, and discourage production use. */
370                 return -EPERM;
371
372         if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
373             !bpf_map_flags_access_ok(attr->map_flags))
374                 return -EINVAL;
375
376         if (!lru && percpu_lru)
377                 return -EINVAL;
378
379         if (lru && !prealloc)
380                 return -ENOTSUPP;
381
382         if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
383                 return -EINVAL;
384
385         /* check sanity of attributes.
386          * value_size == 0 may be allowed in the future to use map as a set
387          */
388         if (attr->max_entries == 0 || attr->key_size == 0 ||
389             attr->value_size == 0)
390                 return -EINVAL;
391
392         if (attr->key_size > MAX_BPF_STACK)
393                 /* eBPF programs initialize keys on stack, so they cannot be
394                  * larger than max stack size
395                  */
396                 return -E2BIG;
397
398         if (attr->value_size >= KMALLOC_MAX_SIZE -
399             MAX_BPF_STACK - sizeof(struct htab_elem))
400                 /* if value_size is bigger, the user space won't be able to
401                  * access the elements via bpf syscall. This check also makes
402                  * sure that the elem_size doesn't overflow and it's
403                  * kmalloc-able later in htab_map_update_elem()
404                  */
405                 return -E2BIG;
406
407         return 0;
408 }
409
410 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
411 {
412         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
413                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
414         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
415                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
416         /* percpu_lru means each cpu has its own LRU list.
417          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
418          * the map's value itself is percpu.  percpu_lru has
419          * nothing to do with the map's value.
420          */
421         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
422         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
423         struct bpf_htab *htab;
424         u64 cost;
425         int err;
426
427         htab = kzalloc(sizeof(*htab), GFP_USER);
428         if (!htab)
429                 return ERR_PTR(-ENOMEM);
430
431         bpf_map_init_from_attr(&htab->map, attr);
432
433         if (percpu_lru) {
434                 /* ensure each CPU's lru list has >=1 elements.
435                  * since we are at it, make each lru list has the same
436                  * number of elements.
437                  */
438                 htab->map.max_entries = roundup(attr->max_entries,
439                                                 num_possible_cpus());
440                 if (htab->map.max_entries < attr->max_entries)
441                         htab->map.max_entries = rounddown(attr->max_entries,
442                                                           num_possible_cpus());
443         }
444
445         /* hash table size must be power of 2 */
446         htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
447
448         htab->elem_size = sizeof(struct htab_elem) +
449                           round_up(htab->map.key_size, 8);
450         if (percpu)
451                 htab->elem_size += sizeof(void *);
452         else
453                 htab->elem_size += round_up(htab->map.value_size, 8);
454
455         err = -E2BIG;
456         /* prevent zero size kmalloc and check for u32 overflow */
457         if (htab->n_buckets == 0 ||
458             htab->n_buckets > U32_MAX / sizeof(struct bucket))
459                 goto free_htab;
460
461         cost = (u64) htab->n_buckets * sizeof(struct bucket) +
462                (u64) htab->elem_size * htab->map.max_entries;
463
464         if (percpu)
465                 cost += (u64) round_up(htab->map.value_size, 8) *
466                         num_possible_cpus() * htab->map.max_entries;
467         else
468                cost += (u64) htab->elem_size * num_possible_cpus();
469
470         /* if map size is larger than memlock limit, reject it */
471         err = bpf_map_charge_init(&htab->map.memory, cost);
472         if (err)
473                 goto free_htab;
474
475         err = -ENOMEM;
476         htab->buckets = bpf_map_area_alloc(htab->n_buckets *
477                                            sizeof(struct bucket),
478                                            htab->map.numa_node);
479         if (!htab->buckets)
480                 goto free_charge;
481
482         if (htab->map.map_flags & BPF_F_ZERO_SEED)
483                 htab->hashrnd = 0;
484         else
485                 htab->hashrnd = get_random_int();
486
487         htab_init_buckets(htab);
488
489         if (prealloc) {
490                 err = prealloc_init(htab);
491                 if (err)
492                         goto free_buckets;
493
494                 if (!percpu && !lru) {
495                         /* lru itself can remove the least used element, so
496                          * there is no need for an extra elem during map_update.
497                          */
498                         err = alloc_extra_elems(htab);
499                         if (err)
500                                 goto free_prealloc;
501                 }
502         }
503
504         return &htab->map;
505
506 free_prealloc:
507         prealloc_destroy(htab);
508 free_buckets:
509         bpf_map_area_free(htab->buckets);
510 free_charge:
511         bpf_map_charge_finish(&htab->map.memory);
512 free_htab:
513         kfree(htab);
514         return ERR_PTR(err);
515 }
516
517 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
518 {
519         return jhash(key, key_len, hashrnd);
520 }
521
522 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
523 {
524         return &htab->buckets[hash & (htab->n_buckets - 1)];
525 }
526
527 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
528 {
529         return &__select_bucket(htab, hash)->head;
530 }
531
532 /* this lookup function can only be called with bucket lock taken */
533 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
534                                          void *key, u32 key_size)
535 {
536         struct hlist_nulls_node *n;
537         struct htab_elem *l;
538
539         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
540                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
541                         return l;
542
543         return NULL;
544 }
545
546 /* can be called without bucket lock. it will repeat the loop in
547  * the unlikely event when elements moved from one bucket into another
548  * while link list is being walked
549  */
550 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
551                                                u32 hash, void *key,
552                                                u32 key_size, u32 n_buckets)
553 {
554         struct hlist_nulls_node *n;
555         struct htab_elem *l;
556
557 again:
558         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
559                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
560                         return l;
561
562         if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
563                 goto again;
564
565         return NULL;
566 }
567
568 /* Called from syscall or from eBPF program directly, so
569  * arguments have to match bpf_map_lookup_elem() exactly.
570  * The return value is adjusted by BPF instructions
571  * in htab_map_gen_lookup().
572  */
573 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
574 {
575         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
576         struct hlist_nulls_head *head;
577         struct htab_elem *l;
578         u32 hash, key_size;
579
580         /* Must be called with rcu_read_lock. */
581         WARN_ON_ONCE(!rcu_read_lock_held());
582
583         key_size = map->key_size;
584
585         hash = htab_map_hash(key, key_size, htab->hashrnd);
586
587         head = select_bucket(htab, hash);
588
589         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
590
591         return l;
592 }
593
594 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
595 {
596         struct htab_elem *l = __htab_map_lookup_elem(map, key);
597
598         if (l)
599                 return l->key + round_up(map->key_size, 8);
600
601         return NULL;
602 }
603
604 /* inline bpf_map_lookup_elem() call.
605  * Instead of:
606  * bpf_prog
607  *   bpf_map_lookup_elem
608  *     map->ops->map_lookup_elem
609  *       htab_map_lookup_elem
610  *         __htab_map_lookup_elem
611  * do:
612  * bpf_prog
613  *   __htab_map_lookup_elem
614  */
615 static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
616 {
617         struct bpf_insn *insn = insn_buf;
618         const int ret = BPF_REG_0;
619
620         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
621                      (void *(*)(struct bpf_map *map, void *key))NULL));
622         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
623         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
624         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
625                                 offsetof(struct htab_elem, key) +
626                                 round_up(map->key_size, 8));
627         return insn - insn_buf;
628 }
629
630 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
631                                                         void *key, const bool mark)
632 {
633         struct htab_elem *l = __htab_map_lookup_elem(map, key);
634
635         if (l) {
636                 if (mark)
637                         bpf_lru_node_set_ref(&l->lru_node);
638                 return l->key + round_up(map->key_size, 8);
639         }
640
641         return NULL;
642 }
643
644 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
645 {
646         return __htab_lru_map_lookup_elem(map, key, true);
647 }
648
649 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
650 {
651         return __htab_lru_map_lookup_elem(map, key, false);
652 }
653
654 static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
655                                    struct bpf_insn *insn_buf)
656 {
657         struct bpf_insn *insn = insn_buf;
658         const int ret = BPF_REG_0;
659         const int ref_reg = BPF_REG_1;
660
661         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
662                      (void *(*)(struct bpf_map *map, void *key))NULL));
663         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
664         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
665         *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
666                               offsetof(struct htab_elem, lru_node) +
667                               offsetof(struct bpf_lru_node, ref));
668         *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
669         *insn++ = BPF_ST_MEM(BPF_B, ret,
670                              offsetof(struct htab_elem, lru_node) +
671                              offsetof(struct bpf_lru_node, ref),
672                              1);
673         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
674                                 offsetof(struct htab_elem, key) +
675                                 round_up(map->key_size, 8));
676         return insn - insn_buf;
677 }
678
679 /* It is called from the bpf_lru_list when the LRU needs to delete
680  * older elements from the htab.
681  */
682 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
683 {
684         struct bpf_htab *htab = (struct bpf_htab *)arg;
685         struct htab_elem *l = NULL, *tgt_l;
686         struct hlist_nulls_head *head;
687         struct hlist_nulls_node *n;
688         unsigned long flags;
689         struct bucket *b;
690
691         tgt_l = container_of(node, struct htab_elem, lru_node);
692         b = __select_bucket(htab, tgt_l->hash);
693         head = &b->head;
694
695         flags = htab_lock_bucket(htab, b);
696
697         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
698                 if (l == tgt_l) {
699                         hlist_nulls_del_rcu(&l->hash_node);
700                         break;
701                 }
702
703         htab_unlock_bucket(htab, b, flags);
704
705         return l == tgt_l;
706 }
707
708 /* Called from syscall */
709 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
710 {
711         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
712         struct hlist_nulls_head *head;
713         struct htab_elem *l, *next_l;
714         u32 hash, key_size;
715         int i = 0;
716
717         WARN_ON_ONCE(!rcu_read_lock_held());
718
719         key_size = map->key_size;
720
721         if (!key)
722                 goto find_first_elem;
723
724         hash = htab_map_hash(key, key_size, htab->hashrnd);
725
726         head = select_bucket(htab, hash);
727
728         /* lookup the key */
729         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
730
731         if (!l)
732                 goto find_first_elem;
733
734         /* key was found, get next key in the same bucket */
735         next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
736                                   struct htab_elem, hash_node);
737
738         if (next_l) {
739                 /* if next elem in this hash list is non-zero, just return it */
740                 memcpy(next_key, next_l->key, key_size);
741                 return 0;
742         }
743
744         /* no more elements in this hash list, go to the next bucket */
745         i = hash & (htab->n_buckets - 1);
746         i++;
747
748 find_first_elem:
749         /* iterate over buckets */
750         for (; i < htab->n_buckets; i++) {
751                 head = select_bucket(htab, i);
752
753                 /* pick first element in the bucket */
754                 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
755                                           struct htab_elem, hash_node);
756                 if (next_l) {
757                         /* if it's not empty, just return it */
758                         memcpy(next_key, next_l->key, key_size);
759                         return 0;
760                 }
761         }
762
763         /* iterated over all buckets and all elements */
764         return -ENOENT;
765 }
766
767 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
768 {
769         if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
770                 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
771         kfree(l);
772 }
773
774 static void htab_elem_free_rcu(struct rcu_head *head)
775 {
776         struct htab_elem *l = container_of(head, struct htab_elem, rcu);
777         struct bpf_htab *htab = l->htab;
778
779         htab_elem_free(htab, l);
780 }
781
782 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
783 {
784         struct bpf_map *map = &htab->map;
785
786         if (map->ops->map_fd_put_ptr) {
787                 void *ptr = fd_htab_map_get_ptr(map, l);
788
789                 map->ops->map_fd_put_ptr(ptr);
790         }
791
792         if (htab_is_prealloc(htab)) {
793                 __pcpu_freelist_push(&htab->freelist, &l->fnode);
794         } else {
795                 atomic_dec(&htab->count);
796                 l->htab = htab;
797                 call_rcu(&l->rcu, htab_elem_free_rcu);
798         }
799 }
800
801 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
802                             void *value, bool onallcpus)
803 {
804         if (!onallcpus) {
805                 /* copy true value_size bytes */
806                 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
807         } else {
808                 u32 size = round_up(htab->map.value_size, 8);
809                 int off = 0, cpu;
810
811                 for_each_possible_cpu(cpu) {
812                         bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
813                                         value + off, size);
814                         off += size;
815                 }
816         }
817 }
818
819 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
820 {
821         return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
822                BITS_PER_LONG == 64;
823 }
824
825 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
826                                          void *value, u32 key_size, u32 hash,
827                                          bool percpu, bool onallcpus,
828                                          struct htab_elem *old_elem)
829 {
830         u32 size = htab->map.value_size;
831         bool prealloc = htab_is_prealloc(htab);
832         struct htab_elem *l_new, **pl_new;
833         void __percpu *pptr;
834
835         if (prealloc) {
836                 if (old_elem) {
837                         /* if we're updating the existing element,
838                          * use per-cpu extra elems to avoid freelist_pop/push
839                          */
840                         pl_new = this_cpu_ptr(htab->extra_elems);
841                         l_new = *pl_new;
842                         *pl_new = old_elem;
843                 } else {
844                         struct pcpu_freelist_node *l;
845
846                         l = __pcpu_freelist_pop(&htab->freelist);
847                         if (!l)
848                                 return ERR_PTR(-E2BIG);
849                         l_new = container_of(l, struct htab_elem, fnode);
850                 }
851         } else {
852                 if (atomic_inc_return(&htab->count) > htab->map.max_entries)
853                         if (!old_elem) {
854                                 /* when map is full and update() is replacing
855                                  * old element, it's ok to allocate, since
856                                  * old element will be freed immediately.
857                                  * Otherwise return an error
858                                  */
859                                 l_new = ERR_PTR(-E2BIG);
860                                 goto dec_count;
861                         }
862                 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
863                                      htab->map.numa_node);
864                 if (!l_new) {
865                         l_new = ERR_PTR(-ENOMEM);
866                         goto dec_count;
867                 }
868                 check_and_init_map_lock(&htab->map,
869                                         l_new->key + round_up(key_size, 8));
870         }
871
872         memcpy(l_new->key, key, key_size);
873         if (percpu) {
874                 size = round_up(size, 8);
875                 if (prealloc) {
876                         pptr = htab_elem_get_ptr(l_new, key_size);
877                 } else {
878                         /* alloc_percpu zero-fills */
879                         pptr = __alloc_percpu_gfp(size, 8,
880                                                   GFP_ATOMIC | __GFP_NOWARN);
881                         if (!pptr) {
882                                 kfree(l_new);
883                                 l_new = ERR_PTR(-ENOMEM);
884                                 goto dec_count;
885                         }
886                 }
887
888                 pcpu_copy_value(htab, pptr, value, onallcpus);
889
890                 if (!prealloc)
891                         htab_elem_set_ptr(l_new, key_size, pptr);
892         } else if (fd_htab_map_needs_adjust(htab)) {
893                 size = round_up(size, 8);
894                 memcpy(l_new->key + round_up(key_size, 8), value, size);
895         } else {
896                 copy_map_value(&htab->map,
897                                l_new->key + round_up(key_size, 8),
898                                value);
899         }
900
901         l_new->hash = hash;
902         return l_new;
903 dec_count:
904         atomic_dec(&htab->count);
905         return l_new;
906 }
907
908 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
909                        u64 map_flags)
910 {
911         if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
912                 /* elem already exists */
913                 return -EEXIST;
914
915         if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
916                 /* elem doesn't exist, cannot update it */
917                 return -ENOENT;
918
919         return 0;
920 }
921
922 /* Called from syscall or from eBPF program */
923 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
924                                 u64 map_flags)
925 {
926         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
927         struct htab_elem *l_new = NULL, *l_old;
928         struct hlist_nulls_head *head;
929         unsigned long flags;
930         struct bucket *b;
931         u32 key_size, hash;
932         int ret;
933
934         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
935                 /* unknown flags */
936                 return -EINVAL;
937
938         WARN_ON_ONCE(!rcu_read_lock_held());
939
940         key_size = map->key_size;
941
942         hash = htab_map_hash(key, key_size, htab->hashrnd);
943
944         b = __select_bucket(htab, hash);
945         head = &b->head;
946
947         if (unlikely(map_flags & BPF_F_LOCK)) {
948                 if (unlikely(!map_value_has_spin_lock(map)))
949                         return -EINVAL;
950                 /* find an element without taking the bucket lock */
951                 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
952                                               htab->n_buckets);
953                 ret = check_flags(htab, l_old, map_flags);
954                 if (ret)
955                         return ret;
956                 if (l_old) {
957                         /* grab the element lock and update value in place */
958                         copy_map_value_locked(map,
959                                               l_old->key + round_up(key_size, 8),
960                                               value, false);
961                         return 0;
962                 }
963                 /* fall through, grab the bucket lock and lookup again.
964                  * 99.9% chance that the element won't be found,
965                  * but second lookup under lock has to be done.
966                  */
967         }
968
969         flags = htab_lock_bucket(htab, b);
970
971         l_old = lookup_elem_raw(head, hash, key, key_size);
972
973         ret = check_flags(htab, l_old, map_flags);
974         if (ret)
975                 goto err;
976
977         if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
978                 /* first lookup without the bucket lock didn't find the element,
979                  * but second lookup with the bucket lock found it.
980                  * This case is highly unlikely, but has to be dealt with:
981                  * grab the element lock in addition to the bucket lock
982                  * and update element in place
983                  */
984                 copy_map_value_locked(map,
985                                       l_old->key + round_up(key_size, 8),
986                                       value, false);
987                 ret = 0;
988                 goto err;
989         }
990
991         l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
992                                 l_old);
993         if (IS_ERR(l_new)) {
994                 /* all pre-allocated elements are in use or memory exhausted */
995                 ret = PTR_ERR(l_new);
996                 goto err;
997         }
998
999         /* add new element to the head of the list, so that
1000          * concurrent search will find it before old elem
1001          */
1002         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1003         if (l_old) {
1004                 hlist_nulls_del_rcu(&l_old->hash_node);
1005                 if (!htab_is_prealloc(htab))
1006                         free_htab_elem(htab, l_old);
1007         }
1008         ret = 0;
1009 err:
1010         htab_unlock_bucket(htab, b, flags);
1011         return ret;
1012 }
1013
1014 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1015                                     u64 map_flags)
1016 {
1017         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1018         struct htab_elem *l_new, *l_old = NULL;
1019         struct hlist_nulls_head *head;
1020         unsigned long flags;
1021         struct bucket *b;
1022         u32 key_size, hash;
1023         int ret;
1024
1025         if (unlikely(map_flags > BPF_EXIST))
1026                 /* unknown flags */
1027                 return -EINVAL;
1028
1029         WARN_ON_ONCE(!rcu_read_lock_held());
1030
1031         key_size = map->key_size;
1032
1033         hash = htab_map_hash(key, key_size, htab->hashrnd);
1034
1035         b = __select_bucket(htab, hash);
1036         head = &b->head;
1037
1038         /* For LRU, we need to alloc before taking bucket's
1039          * spinlock because getting free nodes from LRU may need
1040          * to remove older elements from htab and this removal
1041          * operation will need a bucket lock.
1042          */
1043         l_new = prealloc_lru_pop(htab, key, hash);
1044         if (!l_new)
1045                 return -ENOMEM;
1046         memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
1047
1048         flags = htab_lock_bucket(htab, b);
1049
1050         l_old = lookup_elem_raw(head, hash, key, key_size);
1051
1052         ret = check_flags(htab, l_old, map_flags);
1053         if (ret)
1054                 goto err;
1055
1056         /* add new element to the head of the list, so that
1057          * concurrent search will find it before old elem
1058          */
1059         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1060         if (l_old) {
1061                 bpf_lru_node_set_ref(&l_new->lru_node);
1062                 hlist_nulls_del_rcu(&l_old->hash_node);
1063         }
1064         ret = 0;
1065
1066 err:
1067         htab_unlock_bucket(htab, b, flags);
1068
1069         if (ret)
1070                 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1071         else if (l_old)
1072                 bpf_lru_push_free(&htab->lru, &l_old->lru_node);
1073
1074         return ret;
1075 }
1076
1077 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1078                                          void *value, u64 map_flags,
1079                                          bool onallcpus)
1080 {
1081         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1082         struct htab_elem *l_new = NULL, *l_old;
1083         struct hlist_nulls_head *head;
1084         unsigned long flags;
1085         struct bucket *b;
1086         u32 key_size, hash;
1087         int ret;
1088
1089         if (unlikely(map_flags > BPF_EXIST))
1090                 /* unknown flags */
1091                 return -EINVAL;
1092
1093         WARN_ON_ONCE(!rcu_read_lock_held());
1094
1095         key_size = map->key_size;
1096
1097         hash = htab_map_hash(key, key_size, htab->hashrnd);
1098
1099         b = __select_bucket(htab, hash);
1100         head = &b->head;
1101
1102         flags = htab_lock_bucket(htab, b);
1103
1104         l_old = lookup_elem_raw(head, hash, key, key_size);
1105
1106         ret = check_flags(htab, l_old, map_flags);
1107         if (ret)
1108                 goto err;
1109
1110         if (l_old) {
1111                 /* per-cpu hash map can update value in-place */
1112                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1113                                 value, onallcpus);
1114         } else {
1115                 l_new = alloc_htab_elem(htab, key, value, key_size,
1116                                         hash, true, onallcpus, NULL);
1117                 if (IS_ERR(l_new)) {
1118                         ret = PTR_ERR(l_new);
1119                         goto err;
1120                 }
1121                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1122         }
1123         ret = 0;
1124 err:
1125         htab_unlock_bucket(htab, b, flags);
1126         return ret;
1127 }
1128
1129 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1130                                              void *value, u64 map_flags,
1131                                              bool onallcpus)
1132 {
1133         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1134         struct htab_elem *l_new = NULL, *l_old;
1135         struct hlist_nulls_head *head;
1136         unsigned long flags;
1137         struct bucket *b;
1138         u32 key_size, hash;
1139         int ret;
1140
1141         if (unlikely(map_flags > BPF_EXIST))
1142                 /* unknown flags */
1143                 return -EINVAL;
1144
1145         WARN_ON_ONCE(!rcu_read_lock_held());
1146
1147         key_size = map->key_size;
1148
1149         hash = htab_map_hash(key, key_size, htab->hashrnd);
1150
1151         b = __select_bucket(htab, hash);
1152         head = &b->head;
1153
1154         /* For LRU, we need to alloc before taking bucket's
1155          * spinlock because LRU's elem alloc may need
1156          * to remove older elem from htab and this removal
1157          * operation will need a bucket lock.
1158          */
1159         if (map_flags != BPF_EXIST) {
1160                 l_new = prealloc_lru_pop(htab, key, hash);
1161                 if (!l_new)
1162                         return -ENOMEM;
1163         }
1164
1165         flags = htab_lock_bucket(htab, b);
1166
1167         l_old = lookup_elem_raw(head, hash, key, key_size);
1168
1169         ret = check_flags(htab, l_old, map_flags);
1170         if (ret)
1171                 goto err;
1172
1173         if (l_old) {
1174                 bpf_lru_node_set_ref(&l_old->lru_node);
1175
1176                 /* per-cpu hash map can update value in-place */
1177                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1178                                 value, onallcpus);
1179         } else {
1180                 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
1181                                 value, onallcpus);
1182                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1183                 l_new = NULL;
1184         }
1185         ret = 0;
1186 err:
1187         htab_unlock_bucket(htab, b, flags);
1188         if (l_new)
1189                 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1190         return ret;
1191 }
1192
1193 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1194                                        void *value, u64 map_flags)
1195 {
1196         return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1197 }
1198
1199 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1200                                            void *value, u64 map_flags)
1201 {
1202         return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1203                                                  false);
1204 }
1205
1206 /* Called from syscall or from eBPF program */
1207 static int htab_map_delete_elem(struct bpf_map *map, void *key)
1208 {
1209         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1210         struct hlist_nulls_head *head;
1211         struct bucket *b;
1212         struct htab_elem *l;
1213         unsigned long flags;
1214         u32 hash, key_size;
1215         int ret = -ENOENT;
1216
1217         WARN_ON_ONCE(!rcu_read_lock_held());
1218
1219         key_size = map->key_size;
1220
1221         hash = htab_map_hash(key, key_size, htab->hashrnd);
1222         b = __select_bucket(htab, hash);
1223         head = &b->head;
1224
1225         flags = htab_lock_bucket(htab, b);
1226
1227         l = lookup_elem_raw(head, hash, key, key_size);
1228
1229         if (l) {
1230                 hlist_nulls_del_rcu(&l->hash_node);
1231                 free_htab_elem(htab, l);
1232                 ret = 0;
1233         }
1234
1235         htab_unlock_bucket(htab, b, flags);
1236         return ret;
1237 }
1238
1239 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1240 {
1241         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1242         struct hlist_nulls_head *head;
1243         struct bucket *b;
1244         struct htab_elem *l;
1245         unsigned long flags;
1246         u32 hash, key_size;
1247         int ret = -ENOENT;
1248
1249         WARN_ON_ONCE(!rcu_read_lock_held());
1250
1251         key_size = map->key_size;
1252
1253         hash = htab_map_hash(key, key_size, htab->hashrnd);
1254         b = __select_bucket(htab, hash);
1255         head = &b->head;
1256
1257         flags = htab_lock_bucket(htab, b);
1258
1259         l = lookup_elem_raw(head, hash, key, key_size);
1260
1261         if (l) {
1262                 hlist_nulls_del_rcu(&l->hash_node);
1263                 ret = 0;
1264         }
1265
1266         htab_unlock_bucket(htab, b, flags);
1267         if (l)
1268                 bpf_lru_push_free(&htab->lru, &l->lru_node);
1269         return ret;
1270 }
1271
1272 static void delete_all_elements(struct bpf_htab *htab)
1273 {
1274         int i;
1275
1276         for (i = 0; i < htab->n_buckets; i++) {
1277                 struct hlist_nulls_head *head = select_bucket(htab, i);
1278                 struct hlist_nulls_node *n;
1279                 struct htab_elem *l;
1280
1281                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1282                         hlist_nulls_del_rcu(&l->hash_node);
1283                         htab_elem_free(htab, l);
1284                 }
1285         }
1286 }
1287
1288 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1289 static void htab_map_free(struct bpf_map *map)
1290 {
1291         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1292
1293         /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
1294          * so the programs (can be more than one that used this map) were
1295          * disconnected from events. Wait for outstanding critical sections in
1296          * these programs to complete
1297          */
1298         synchronize_rcu();
1299
1300         /* some of free_htab_elem() callbacks for elements of this map may
1301          * not have executed. Wait for them.
1302          */
1303         rcu_barrier();
1304         if (!htab_is_prealloc(htab))
1305                 delete_all_elements(htab);
1306         else
1307                 prealloc_destroy(htab);
1308
1309         free_percpu(htab->extra_elems);
1310         bpf_map_area_free(htab->buckets);
1311         kfree(htab);
1312 }
1313
1314 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1315                                    struct seq_file *m)
1316 {
1317         void *value;
1318
1319         rcu_read_lock();
1320
1321         value = htab_map_lookup_elem(map, key);
1322         if (!value) {
1323                 rcu_read_unlock();
1324                 return;
1325         }
1326
1327         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1328         seq_puts(m, ": ");
1329         btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1330         seq_puts(m, "\n");
1331
1332         rcu_read_unlock();
1333 }
1334
1335 static int
1336 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1337                                    const union bpf_attr *attr,
1338                                    union bpf_attr __user *uattr,
1339                                    bool do_delete, bool is_lru_map,
1340                                    bool is_percpu)
1341 {
1342         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1343         u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1344         void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1345         void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1346         void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1347         void *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1348         u32 batch, max_count, size, bucket_size;
1349         struct htab_elem *node_to_free = NULL;
1350         u64 elem_map_flags, map_flags;
1351         struct hlist_nulls_head *head;
1352         struct hlist_nulls_node *n;
1353         unsigned long flags = 0;
1354         bool locked = false;
1355         struct htab_elem *l;
1356         struct bucket *b;
1357         int ret = 0;
1358
1359         elem_map_flags = attr->batch.elem_flags;
1360         if ((elem_map_flags & ~BPF_F_LOCK) ||
1361             ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
1362                 return -EINVAL;
1363
1364         map_flags = attr->batch.flags;
1365         if (map_flags)
1366                 return -EINVAL;
1367
1368         max_count = attr->batch.count;
1369         if (!max_count)
1370                 return 0;
1371
1372         if (put_user(0, &uattr->batch.count))
1373                 return -EFAULT;
1374
1375         batch = 0;
1376         if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1377                 return -EFAULT;
1378
1379         if (batch >= htab->n_buckets)
1380                 return -ENOENT;
1381
1382         key_size = htab->map.key_size;
1383         roundup_key_size = round_up(htab->map.key_size, 8);
1384         value_size = htab->map.value_size;
1385         size = round_up(value_size, 8);
1386         if (is_percpu)
1387                 value_size = size * num_possible_cpus();
1388         total = 0;
1389         /* while experimenting with hash tables with sizes ranging from 10 to
1390          * 1000, it was observed that a bucket can have upto 5 entries.
1391          */
1392         bucket_size = 5;
1393
1394 alloc:
1395         /* We cannot do copy_from_user or copy_to_user inside
1396          * the rcu_read_lock. Allocate enough space here.
1397          */
1398         keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN);
1399         values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN);
1400         if (!keys || !values) {
1401                 ret = -ENOMEM;
1402                 goto after_loop;
1403         }
1404
1405 again:
1406         bpf_disable_instrumentation();
1407         rcu_read_lock();
1408 again_nocopy:
1409         dst_key = keys;
1410         dst_val = values;
1411         b = &htab->buckets[batch];
1412         head = &b->head;
1413         /* do not grab the lock unless need it (bucket_cnt > 0). */
1414         if (locked)
1415                 flags = htab_lock_bucket(htab, b);
1416
1417         bucket_cnt = 0;
1418         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1419                 bucket_cnt++;
1420
1421         if (bucket_cnt && !locked) {
1422                 locked = true;
1423                 goto again_nocopy;
1424         }
1425
1426         if (bucket_cnt > (max_count - total)) {
1427                 if (total == 0)
1428                         ret = -ENOSPC;
1429                 /* Note that since bucket_cnt > 0 here, it is implicit
1430                  * that the locked was grabbed, so release it.
1431                  */
1432                 htab_unlock_bucket(htab, b, flags);
1433                 rcu_read_unlock();
1434                 bpf_enable_instrumentation();
1435                 goto after_loop;
1436         }
1437
1438         if (bucket_cnt > bucket_size) {
1439                 bucket_size = bucket_cnt;
1440                 /* Note that since bucket_cnt > 0 here, it is implicit
1441                  * that the locked was grabbed, so release it.
1442                  */
1443                 htab_unlock_bucket(htab, b, flags);
1444                 rcu_read_unlock();
1445                 bpf_enable_instrumentation();
1446                 kvfree(keys);
1447                 kvfree(values);
1448                 goto alloc;
1449         }
1450
1451         /* Next block is only safe to run if you have grabbed the lock */
1452         if (!locked)
1453                 goto next_batch;
1454
1455         hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1456                 memcpy(dst_key, l->key, key_size);
1457
1458                 if (is_percpu) {
1459                         int off = 0, cpu;
1460                         void __percpu *pptr;
1461
1462                         pptr = htab_elem_get_ptr(l, map->key_size);
1463                         for_each_possible_cpu(cpu) {
1464                                 bpf_long_memcpy(dst_val + off,
1465                                                 per_cpu_ptr(pptr, cpu), size);
1466                                 off += size;
1467                         }
1468                 } else {
1469                         value = l->key + roundup_key_size;
1470                         if (elem_map_flags & BPF_F_LOCK)
1471                                 copy_map_value_locked(map, dst_val, value,
1472                                                       true);
1473                         else
1474                                 copy_map_value(map, dst_val, value);
1475                         check_and_init_map_lock(map, dst_val);
1476                 }
1477                 if (do_delete) {
1478                         hlist_nulls_del_rcu(&l->hash_node);
1479
1480                         /* bpf_lru_push_free() will acquire lru_lock, which
1481                          * may cause deadlock. See comments in function
1482                          * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1483                          * after releasing the bucket lock.
1484                          */
1485                         if (is_lru_map) {
1486                                 l->batch_flink = node_to_free;
1487                                 node_to_free = l;
1488                         } else {
1489                                 free_htab_elem(htab, l);
1490                         }
1491                 }
1492                 dst_key += key_size;
1493                 dst_val += value_size;
1494         }
1495
1496         htab_unlock_bucket(htab, b, flags);
1497         locked = false;
1498
1499         while (node_to_free) {
1500                 l = node_to_free;
1501                 node_to_free = node_to_free->batch_flink;
1502                 bpf_lru_push_free(&htab->lru, &l->lru_node);
1503         }
1504
1505 next_batch:
1506         /* If we are not copying data, we can go to next bucket and avoid
1507          * unlocking the rcu.
1508          */
1509         if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1510                 batch++;
1511                 goto again_nocopy;
1512         }
1513
1514         rcu_read_unlock();
1515         bpf_enable_instrumentation();
1516         if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1517             key_size * bucket_cnt) ||
1518             copy_to_user(uvalues + total * value_size, values,
1519             value_size * bucket_cnt))) {
1520                 ret = -EFAULT;
1521                 goto after_loop;
1522         }
1523
1524         total += bucket_cnt;
1525         batch++;
1526         if (batch >= htab->n_buckets) {
1527                 ret = -ENOENT;
1528                 goto after_loop;
1529         }
1530         goto again;
1531
1532 after_loop:
1533         if (ret == -EFAULT)
1534                 goto out;
1535
1536         /* copy # of entries and next batch */
1537         ubatch = u64_to_user_ptr(attr->batch.out_batch);
1538         if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1539             put_user(total, &uattr->batch.count))
1540                 ret = -EFAULT;
1541
1542 out:
1543         kvfree(keys);
1544         kvfree(values);
1545         return ret;
1546 }
1547
1548 static int
1549 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1550                              union bpf_attr __user *uattr)
1551 {
1552         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1553                                                   false, true);
1554 }
1555
1556 static int
1557 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1558                                         const union bpf_attr *attr,
1559                                         union bpf_attr __user *uattr)
1560 {
1561         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1562                                                   false, true);
1563 }
1564
1565 static int
1566 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1567                       union bpf_attr __user *uattr)
1568 {
1569         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1570                                                   false, false);
1571 }
1572
1573 static int
1574 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1575                                  const union bpf_attr *attr,
1576                                  union bpf_attr __user *uattr)
1577 {
1578         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1579                                                   false, false);
1580 }
1581
1582 static int
1583 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1584                                  const union bpf_attr *attr,
1585                                  union bpf_attr __user *uattr)
1586 {
1587         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1588                                                   true, true);
1589 }
1590
1591 static int
1592 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1593                                             const union bpf_attr *attr,
1594                                             union bpf_attr __user *uattr)
1595 {
1596         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1597                                                   true, true);
1598 }
1599
1600 static int
1601 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1602                           union bpf_attr __user *uattr)
1603 {
1604         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1605                                                   true, false);
1606 }
1607
1608 static int
1609 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1610                                      const union bpf_attr *attr,
1611                                      union bpf_attr __user *uattr)
1612 {
1613         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1614                                                   true, false);
1615 }
1616
1617 static int htab_map_btf_id;
1618 const struct bpf_map_ops htab_map_ops = {
1619         .map_alloc_check = htab_map_alloc_check,
1620         .map_alloc = htab_map_alloc,
1621         .map_free = htab_map_free,
1622         .map_get_next_key = htab_map_get_next_key,
1623         .map_lookup_elem = htab_map_lookup_elem,
1624         .map_update_elem = htab_map_update_elem,
1625         .map_delete_elem = htab_map_delete_elem,
1626         .map_gen_lookup = htab_map_gen_lookup,
1627         .map_seq_show_elem = htab_map_seq_show_elem,
1628         BATCH_OPS(htab),
1629         .map_btf_name = "bpf_htab",
1630         .map_btf_id = &htab_map_btf_id,
1631 };
1632
1633 static int htab_lru_map_btf_id;
1634 const struct bpf_map_ops htab_lru_map_ops = {
1635         .map_alloc_check = htab_map_alloc_check,
1636         .map_alloc = htab_map_alloc,
1637         .map_free = htab_map_free,
1638         .map_get_next_key = htab_map_get_next_key,
1639         .map_lookup_elem = htab_lru_map_lookup_elem,
1640         .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
1641         .map_update_elem = htab_lru_map_update_elem,
1642         .map_delete_elem = htab_lru_map_delete_elem,
1643         .map_gen_lookup = htab_lru_map_gen_lookup,
1644         .map_seq_show_elem = htab_map_seq_show_elem,
1645         BATCH_OPS(htab_lru),
1646         .map_btf_name = "bpf_htab",
1647         .map_btf_id = &htab_lru_map_btf_id,
1648 };
1649
1650 /* Called from eBPF program */
1651 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1652 {
1653         struct htab_elem *l = __htab_map_lookup_elem(map, key);
1654
1655         if (l)
1656                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1657         else
1658                 return NULL;
1659 }
1660
1661 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1662 {
1663         struct htab_elem *l = __htab_map_lookup_elem(map, key);
1664
1665         if (l) {
1666                 bpf_lru_node_set_ref(&l->lru_node);
1667                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1668         }
1669
1670         return NULL;
1671 }
1672
1673 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1674 {
1675         struct htab_elem *l;
1676         void __percpu *pptr;
1677         int ret = -ENOENT;
1678         int cpu, off = 0;
1679         u32 size;
1680
1681         /* per_cpu areas are zero-filled and bpf programs can only
1682          * access 'value_size' of them, so copying rounded areas
1683          * will not leak any kernel data
1684          */
1685         size = round_up(map->value_size, 8);
1686         rcu_read_lock();
1687         l = __htab_map_lookup_elem(map, key);
1688         if (!l)
1689                 goto out;
1690         /* We do not mark LRU map element here in order to not mess up
1691          * eviction heuristics when user space does a map walk.
1692          */
1693         pptr = htab_elem_get_ptr(l, map->key_size);
1694         for_each_possible_cpu(cpu) {
1695                 bpf_long_memcpy(value + off,
1696                                 per_cpu_ptr(pptr, cpu), size);
1697                 off += size;
1698         }
1699         ret = 0;
1700 out:
1701         rcu_read_unlock();
1702         return ret;
1703 }
1704
1705 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1706                            u64 map_flags)
1707 {
1708         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1709         int ret;
1710
1711         rcu_read_lock();
1712         if (htab_is_lru(htab))
1713                 ret = __htab_lru_percpu_map_update_elem(map, key, value,
1714                                                         map_flags, true);
1715         else
1716                 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
1717                                                     true);
1718         rcu_read_unlock();
1719
1720         return ret;
1721 }
1722
1723 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
1724                                           struct seq_file *m)
1725 {
1726         struct htab_elem *l;
1727         void __percpu *pptr;
1728         int cpu;
1729
1730         rcu_read_lock();
1731
1732         l = __htab_map_lookup_elem(map, key);
1733         if (!l) {
1734                 rcu_read_unlock();
1735                 return;
1736         }
1737
1738         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1739         seq_puts(m, ": {\n");
1740         pptr = htab_elem_get_ptr(l, map->key_size);
1741         for_each_possible_cpu(cpu) {
1742                 seq_printf(m, "\tcpu%d: ", cpu);
1743                 btf_type_seq_show(map->btf, map->btf_value_type_id,
1744                                   per_cpu_ptr(pptr, cpu), m);
1745                 seq_puts(m, "\n");
1746         }
1747         seq_puts(m, "}\n");
1748
1749         rcu_read_unlock();
1750 }
1751
1752 static int htab_percpu_map_btf_id;
1753 const struct bpf_map_ops htab_percpu_map_ops = {
1754         .map_alloc_check = htab_map_alloc_check,
1755         .map_alloc = htab_map_alloc,
1756         .map_free = htab_map_free,
1757         .map_get_next_key = htab_map_get_next_key,
1758         .map_lookup_elem = htab_percpu_map_lookup_elem,
1759         .map_update_elem = htab_percpu_map_update_elem,
1760         .map_delete_elem = htab_map_delete_elem,
1761         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
1762         BATCH_OPS(htab_percpu),
1763         .map_btf_name = "bpf_htab",
1764         .map_btf_id = &htab_percpu_map_btf_id,
1765 };
1766
1767 static int htab_lru_percpu_map_btf_id;
1768 const struct bpf_map_ops htab_lru_percpu_map_ops = {
1769         .map_alloc_check = htab_map_alloc_check,
1770         .map_alloc = htab_map_alloc,
1771         .map_free = htab_map_free,
1772         .map_get_next_key = htab_map_get_next_key,
1773         .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
1774         .map_update_elem = htab_lru_percpu_map_update_elem,
1775         .map_delete_elem = htab_lru_map_delete_elem,
1776         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
1777         BATCH_OPS(htab_lru_percpu),
1778         .map_btf_name = "bpf_htab",
1779         .map_btf_id = &htab_lru_percpu_map_btf_id,
1780 };
1781
1782 static int fd_htab_map_alloc_check(union bpf_attr *attr)
1783 {
1784         if (attr->value_size != sizeof(u32))
1785                 return -EINVAL;
1786         return htab_map_alloc_check(attr);
1787 }
1788
1789 static void fd_htab_map_free(struct bpf_map *map)
1790 {
1791         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1792         struct hlist_nulls_node *n;
1793         struct hlist_nulls_head *head;
1794         struct htab_elem *l;
1795         int i;
1796
1797         for (i = 0; i < htab->n_buckets; i++) {
1798                 head = select_bucket(htab, i);
1799
1800                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1801                         void *ptr = fd_htab_map_get_ptr(map, l);
1802
1803                         map->ops->map_fd_put_ptr(ptr);
1804                 }
1805         }
1806
1807         htab_map_free(map);
1808 }
1809
1810 /* only called from syscall */
1811 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
1812 {
1813         void **ptr;
1814         int ret = 0;
1815
1816         if (!map->ops->map_fd_sys_lookup_elem)
1817                 return -ENOTSUPP;
1818
1819         rcu_read_lock();
1820         ptr = htab_map_lookup_elem(map, key);
1821         if (ptr)
1822                 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
1823         else
1824                 ret = -ENOENT;
1825         rcu_read_unlock();
1826
1827         return ret;
1828 }
1829
1830 /* only called from syscall */
1831 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1832                                 void *key, void *value, u64 map_flags)
1833 {
1834         void *ptr;
1835         int ret;
1836         u32 ufd = *(u32 *)value;
1837
1838         ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
1839         if (IS_ERR(ptr))
1840                 return PTR_ERR(ptr);
1841
1842         ret = htab_map_update_elem(map, key, &ptr, map_flags);
1843         if (ret)
1844                 map->ops->map_fd_put_ptr(ptr);
1845
1846         return ret;
1847 }
1848
1849 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
1850 {
1851         struct bpf_map *map, *inner_map_meta;
1852
1853         inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1854         if (IS_ERR(inner_map_meta))
1855                 return inner_map_meta;
1856
1857         map = htab_map_alloc(attr);
1858         if (IS_ERR(map)) {
1859                 bpf_map_meta_free(inner_map_meta);
1860                 return map;
1861         }
1862
1863         map->inner_map_meta = inner_map_meta;
1864
1865         return map;
1866 }
1867
1868 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
1869 {
1870         struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
1871
1872         if (!inner_map)
1873                 return NULL;
1874
1875         return READ_ONCE(*inner_map);
1876 }
1877
1878 static u32 htab_of_map_gen_lookup(struct bpf_map *map,
1879                                   struct bpf_insn *insn_buf)
1880 {
1881         struct bpf_insn *insn = insn_buf;
1882         const int ret = BPF_REG_0;
1883
1884         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
1885                      (void *(*)(struct bpf_map *map, void *key))NULL));
1886         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
1887         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
1888         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
1889                                 offsetof(struct htab_elem, key) +
1890                                 round_up(map->key_size, 8));
1891         *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1892
1893         return insn - insn_buf;
1894 }
1895
1896 static void htab_of_map_free(struct bpf_map *map)
1897 {
1898         bpf_map_meta_free(map->inner_map_meta);
1899         fd_htab_map_free(map);
1900 }
1901
1902 static int htab_of_maps_map_btf_id;
1903 const struct bpf_map_ops htab_of_maps_map_ops = {
1904         .map_alloc_check = fd_htab_map_alloc_check,
1905         .map_alloc = htab_of_map_alloc,
1906         .map_free = htab_of_map_free,
1907         .map_get_next_key = htab_map_get_next_key,
1908         .map_lookup_elem = htab_of_map_lookup_elem,
1909         .map_delete_elem = htab_map_delete_elem,
1910         .map_fd_get_ptr = bpf_map_fd_get_ptr,
1911         .map_fd_put_ptr = bpf_map_fd_put_ptr,
1912         .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1913         .map_gen_lookup = htab_of_map_gen_lookup,
1914         .map_check_btf = map_check_no_btf,
1915         .map_btf_name = "bpf_htab",
1916         .map_btf_id = &htab_of_maps_map_btf_id,
1917 };