atlantic: Fix driver resume flow.
[linux-2.6-microblaze.git] / kernel / bpf / devmap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51
52 #define DEV_CREATE_FLAG_MASK \
53         (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
54
55 struct xdp_dev_bulk_queue {
56         struct xdp_frame *q[DEV_MAP_BULK_SIZE];
57         struct list_head flush_node;
58         struct net_device *dev;
59         struct net_device *dev_rx;
60         struct bpf_prog *xdp_prog;
61         unsigned int count;
62 };
63
64 struct bpf_dtab_netdev {
65         struct net_device *dev; /* must be first member, due to tracepoint */
66         struct hlist_node index_hlist;
67         struct bpf_dtab *dtab;
68         struct bpf_prog *xdp_prog;
69         struct rcu_head rcu;
70         unsigned int idx;
71         struct bpf_devmap_val val;
72 };
73
74 struct bpf_dtab {
75         struct bpf_map map;
76         struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77         struct list_head list;
78
79         /* these are only used for DEVMAP_HASH type maps */
80         struct hlist_head *dev_index_head;
81         spinlock_t index_lock;
82         unsigned int items;
83         u32 n_buckets;
84 };
85
86 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
87 static DEFINE_SPINLOCK(dev_map_lock);
88 static LIST_HEAD(dev_map_list);
89
90 static struct hlist_head *dev_map_create_hash(unsigned int entries,
91                                               int numa_node)
92 {
93         int i;
94         struct hlist_head *hash;
95
96         hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
97         if (hash != NULL)
98                 for (i = 0; i < entries; i++)
99                         INIT_HLIST_HEAD(&hash[i]);
100
101         return hash;
102 }
103
104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
105                                                     int idx)
106 {
107         return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
108 }
109
110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
111 {
112         u32 valsize = attr->value_size;
113
114         /* check sanity of attributes. 2 value sizes supported:
115          * 4 bytes: ifindex
116          * 8 bytes: ifindex + prog fd
117          */
118         if (attr->max_entries == 0 || attr->key_size != 4 ||
119             (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
120              valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
121             attr->map_flags & ~DEV_CREATE_FLAG_MASK)
122                 return -EINVAL;
123
124         /* Lookup returns a pointer straight to dev->ifindex, so make sure the
125          * verifier prevents writes from the BPF side
126          */
127         attr->map_flags |= BPF_F_RDONLY_PROG;
128
129
130         bpf_map_init_from_attr(&dtab->map, attr);
131
132         if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
133                 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
134
135                 if (!dtab->n_buckets) /* Overflow check */
136                         return -EINVAL;
137         }
138
139         if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
140                 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
141                                                            dtab->map.numa_node);
142                 if (!dtab->dev_index_head)
143                         return -ENOMEM;
144
145                 spin_lock_init(&dtab->index_lock);
146         } else {
147                 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
148                                                       sizeof(struct bpf_dtab_netdev *),
149                                                       dtab->map.numa_node);
150                 if (!dtab->netdev_map)
151                         return -ENOMEM;
152         }
153
154         return 0;
155 }
156
157 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
158 {
159         struct bpf_dtab *dtab;
160         int err;
161
162         if (!capable(CAP_NET_ADMIN))
163                 return ERR_PTR(-EPERM);
164
165         dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT);
166         if (!dtab)
167                 return ERR_PTR(-ENOMEM);
168
169         err = dev_map_init_map(dtab, attr);
170         if (err) {
171                 kfree(dtab);
172                 return ERR_PTR(err);
173         }
174
175         spin_lock(&dev_map_lock);
176         list_add_tail_rcu(&dtab->list, &dev_map_list);
177         spin_unlock(&dev_map_lock);
178
179         return &dtab->map;
180 }
181
182 static void dev_map_free(struct bpf_map *map)
183 {
184         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
185         int i;
186
187         /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
188          * so the programs (can be more than one that used this map) were
189          * disconnected from events. The following synchronize_rcu() guarantees
190          * both rcu read critical sections complete and waits for
191          * preempt-disable regions (NAPI being the relevant context here) so we
192          * are certain there will be no further reads against the netdev_map and
193          * all flush operations are complete. Flush operations can only be done
194          * from NAPI context for this reason.
195          */
196
197         spin_lock(&dev_map_lock);
198         list_del_rcu(&dtab->list);
199         spin_unlock(&dev_map_lock);
200
201         bpf_clear_redirect_map(map);
202         synchronize_rcu();
203
204         /* Make sure prior __dev_map_entry_free() have completed. */
205         rcu_barrier();
206
207         if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
208                 for (i = 0; i < dtab->n_buckets; i++) {
209                         struct bpf_dtab_netdev *dev;
210                         struct hlist_head *head;
211                         struct hlist_node *next;
212
213                         head = dev_map_index_hash(dtab, i);
214
215                         hlist_for_each_entry_safe(dev, next, head, index_hlist) {
216                                 hlist_del_rcu(&dev->index_hlist);
217                                 if (dev->xdp_prog)
218                                         bpf_prog_put(dev->xdp_prog);
219                                 dev_put(dev->dev);
220                                 kfree(dev);
221                         }
222                 }
223
224                 bpf_map_area_free(dtab->dev_index_head);
225         } else {
226                 for (i = 0; i < dtab->map.max_entries; i++) {
227                         struct bpf_dtab_netdev *dev;
228
229                         dev = rcu_dereference_raw(dtab->netdev_map[i]);
230                         if (!dev)
231                                 continue;
232
233                         if (dev->xdp_prog)
234                                 bpf_prog_put(dev->xdp_prog);
235                         dev_put(dev->dev);
236                         kfree(dev);
237                 }
238
239                 bpf_map_area_free(dtab->netdev_map);
240         }
241
242         kfree(dtab);
243 }
244
245 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
246 {
247         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
248         u32 index = key ? *(u32 *)key : U32_MAX;
249         u32 *next = next_key;
250
251         if (index >= dtab->map.max_entries) {
252                 *next = 0;
253                 return 0;
254         }
255
256         if (index == dtab->map.max_entries - 1)
257                 return -ENOENT;
258         *next = index + 1;
259         return 0;
260 }
261
262 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
263  * by local_bh_disable() (from XDP calls inside NAPI). The
264  * rcu_read_lock_bh_held() below makes lockdep accept both.
265  */
266 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
267 {
268         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
269         struct hlist_head *head = dev_map_index_hash(dtab, key);
270         struct bpf_dtab_netdev *dev;
271
272         hlist_for_each_entry_rcu(dev, head, index_hlist,
273                                  lockdep_is_held(&dtab->index_lock))
274                 if (dev->idx == key)
275                         return dev;
276
277         return NULL;
278 }
279
280 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
281                                     void *next_key)
282 {
283         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
284         u32 idx, *next = next_key;
285         struct bpf_dtab_netdev *dev, *next_dev;
286         struct hlist_head *head;
287         int i = 0;
288
289         if (!key)
290                 goto find_first;
291
292         idx = *(u32 *)key;
293
294         dev = __dev_map_hash_lookup_elem(map, idx);
295         if (!dev)
296                 goto find_first;
297
298         next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
299                                     struct bpf_dtab_netdev, index_hlist);
300
301         if (next_dev) {
302                 *next = next_dev->idx;
303                 return 0;
304         }
305
306         i = idx & (dtab->n_buckets - 1);
307         i++;
308
309  find_first:
310         for (; i < dtab->n_buckets; i++) {
311                 head = dev_map_index_hash(dtab, i);
312
313                 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
314                                             struct bpf_dtab_netdev,
315                                             index_hlist);
316                 if (next_dev) {
317                         *next = next_dev->idx;
318                         return 0;
319                 }
320         }
321
322         return -ENOENT;
323 }
324
325 bool dev_map_can_have_prog(struct bpf_map *map)
326 {
327         if ((map->map_type == BPF_MAP_TYPE_DEVMAP ||
328              map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) &&
329             map->value_size != offsetofend(struct bpf_devmap_val, ifindex))
330                 return true;
331
332         return false;
333 }
334
335 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
336                                 struct xdp_frame **frames, int n,
337                                 struct net_device *dev)
338 {
339         struct xdp_txq_info txq = { .dev = dev };
340         struct xdp_buff xdp;
341         int i, nframes = 0;
342
343         for (i = 0; i < n; i++) {
344                 struct xdp_frame *xdpf = frames[i];
345                 u32 act;
346                 int err;
347
348                 xdp_convert_frame_to_buff(xdpf, &xdp);
349                 xdp.txq = &txq;
350
351                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
352                 switch (act) {
353                 case XDP_PASS:
354                         err = xdp_update_frame_from_buff(&xdp, xdpf);
355                         if (unlikely(err < 0))
356                                 xdp_return_frame_rx_napi(xdpf);
357                         else
358                                 frames[nframes++] = xdpf;
359                         break;
360                 default:
361                         bpf_warn_invalid_xdp_action(act);
362                         fallthrough;
363                 case XDP_ABORTED:
364                         trace_xdp_exception(dev, xdp_prog, act);
365                         fallthrough;
366                 case XDP_DROP:
367                         xdp_return_frame_rx_napi(xdpf);
368                         break;
369                 }
370         }
371         return nframes; /* sent frames count */
372 }
373
374 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
375 {
376         struct net_device *dev = bq->dev;
377         unsigned int cnt = bq->count;
378         int sent = 0, err = 0;
379         int to_send = cnt;
380         int i;
381
382         if (unlikely(!cnt))
383                 return;
384
385         for (i = 0; i < cnt; i++) {
386                 struct xdp_frame *xdpf = bq->q[i];
387
388                 prefetch(xdpf);
389         }
390
391         if (bq->xdp_prog) {
392                 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
393                 if (!to_send)
394                         goto out;
395         }
396
397         sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
398         if (sent < 0) {
399                 /* If ndo_xdp_xmit fails with an errno, no frames have
400                  * been xmit'ed.
401                  */
402                 err = sent;
403                 sent = 0;
404         }
405
406         /* If not all frames have been transmitted, it is our
407          * responsibility to free them
408          */
409         for (i = sent; unlikely(i < to_send); i++)
410                 xdp_return_frame_rx_napi(bq->q[i]);
411
412 out:
413         bq->count = 0;
414         trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
415 }
416
417 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
418  * driver before returning from its napi->poll() routine. See the comment above
419  * xdp_do_flush() in filter.c.
420  */
421 void __dev_flush(void)
422 {
423         struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
424         struct xdp_dev_bulk_queue *bq, *tmp;
425
426         list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
427                 bq_xmit_all(bq, XDP_XMIT_FLUSH);
428                 bq->dev_rx = NULL;
429                 bq->xdp_prog = NULL;
430                 __list_del_clearprev(&bq->flush_node);
431         }
432 }
433
434 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
435  * by local_bh_disable() (from XDP calls inside NAPI). The
436  * rcu_read_lock_bh_held() below makes lockdep accept both.
437  */
438 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
439 {
440         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
441         struct bpf_dtab_netdev *obj;
442
443         if (key >= map->max_entries)
444                 return NULL;
445
446         obj = rcu_dereference_check(dtab->netdev_map[key],
447                                     rcu_read_lock_bh_held());
448         return obj;
449 }
450
451 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
452  * variable access, and map elements stick around. See comment above
453  * xdp_do_flush() in filter.c.
454  */
455 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
456                        struct net_device *dev_rx, struct bpf_prog *xdp_prog)
457 {
458         struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
459         struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
460
461         if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
462                 bq_xmit_all(bq, 0);
463
464         /* Ingress dev_rx will be the same for all xdp_frame's in
465          * bulk_queue, because bq stored per-CPU and must be flushed
466          * from net_device drivers NAPI func end.
467          *
468          * Do the same with xdp_prog and flush_list since these fields
469          * are only ever modified together.
470          */
471         if (!bq->dev_rx) {
472                 bq->dev_rx = dev_rx;
473                 bq->xdp_prog = xdp_prog;
474                 list_add(&bq->flush_node, flush_list);
475         }
476
477         bq->q[bq->count++] = xdpf;
478 }
479
480 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
481                                 struct net_device *dev_rx,
482                                 struct bpf_prog *xdp_prog)
483 {
484         struct xdp_frame *xdpf;
485         int err;
486
487         if (!dev->netdev_ops->ndo_xdp_xmit)
488                 return -EOPNOTSUPP;
489
490         err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
491         if (unlikely(err))
492                 return err;
493
494         xdpf = xdp_convert_buff_to_frame(xdp);
495         if (unlikely(!xdpf))
496                 return -EOVERFLOW;
497
498         bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
499         return 0;
500 }
501
502 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
503                     struct net_device *dev_rx)
504 {
505         return __xdp_enqueue(dev, xdp, dev_rx, NULL);
506 }
507
508 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
509                     struct net_device *dev_rx)
510 {
511         struct net_device *dev = dst->dev;
512
513         return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
514 }
515
516 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp,
517                          int exclude_ifindex)
518 {
519         if (!obj || obj->dev->ifindex == exclude_ifindex ||
520             !obj->dev->netdev_ops->ndo_xdp_xmit)
521                 return false;
522
523         if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data))
524                 return false;
525
526         return true;
527 }
528
529 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
530                                  struct net_device *dev_rx,
531                                  struct xdp_frame *xdpf)
532 {
533         struct xdp_frame *nxdpf;
534
535         nxdpf = xdpf_clone(xdpf);
536         if (!nxdpf)
537                 return -ENOMEM;
538
539         bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
540
541         return 0;
542 }
543
544 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
545                           struct bpf_map *map, bool exclude_ingress)
546 {
547         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
548         int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0;
549         struct bpf_dtab_netdev *dst, *last_dst = NULL;
550         struct hlist_head *head;
551         struct xdp_frame *xdpf;
552         unsigned int i;
553         int err;
554
555         xdpf = xdp_convert_buff_to_frame(xdp);
556         if (unlikely(!xdpf))
557                 return -EOVERFLOW;
558
559         if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
560                 for (i = 0; i < map->max_entries; i++) {
561                         dst = rcu_dereference_check(dtab->netdev_map[i],
562                                                     rcu_read_lock_bh_held());
563                         if (!is_valid_dst(dst, xdp, exclude_ifindex))
564                                 continue;
565
566                         /* we only need n-1 clones; last_dst enqueued below */
567                         if (!last_dst) {
568                                 last_dst = dst;
569                                 continue;
570                         }
571
572                         err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
573                         if (err)
574                                 return err;
575
576                         last_dst = dst;
577                 }
578         } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
579                 for (i = 0; i < dtab->n_buckets; i++) {
580                         head = dev_map_index_hash(dtab, i);
581                         hlist_for_each_entry_rcu(dst, head, index_hlist,
582                                                  lockdep_is_held(&dtab->index_lock)) {
583                                 if (!is_valid_dst(dst, xdp, exclude_ifindex))
584                                         continue;
585
586                                 /* we only need n-1 clones; last_dst enqueued below */
587                                 if (!last_dst) {
588                                         last_dst = dst;
589                                         continue;
590                                 }
591
592                                 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
593                                 if (err)
594                                         return err;
595
596                                 last_dst = dst;
597                         }
598                 }
599         }
600
601         /* consume the last copy of the frame */
602         if (last_dst)
603                 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
604         else
605                 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
606
607         return 0;
608 }
609
610 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
611                              struct bpf_prog *xdp_prog)
612 {
613         int err;
614
615         err = xdp_ok_fwd_dev(dst->dev, skb->len);
616         if (unlikely(err))
617                 return err;
618         skb->dev = dst->dev;
619         generic_xdp_tx(skb, xdp_prog);
620
621         return 0;
622 }
623
624 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
625                                   struct sk_buff *skb,
626                                   struct bpf_prog *xdp_prog)
627 {
628         struct sk_buff *nskb;
629         int err;
630
631         nskb = skb_clone(skb, GFP_ATOMIC);
632         if (!nskb)
633                 return -ENOMEM;
634
635         err = dev_map_generic_redirect(dst, nskb, xdp_prog);
636         if (unlikely(err)) {
637                 consume_skb(nskb);
638                 return err;
639         }
640
641         return 0;
642 }
643
644 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
645                            struct bpf_prog *xdp_prog, struct bpf_map *map,
646                            bool exclude_ingress)
647 {
648         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
649         int exclude_ifindex = exclude_ingress ? dev->ifindex : 0;
650         struct bpf_dtab_netdev *dst, *last_dst = NULL;
651         struct hlist_head *head;
652         struct hlist_node *next;
653         unsigned int i;
654         int err;
655
656         if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
657                 for (i = 0; i < map->max_entries; i++) {
658                         dst = rcu_dereference_check(dtab->netdev_map[i],
659                                                     rcu_read_lock_bh_held());
660                         if (!dst || dst->dev->ifindex == exclude_ifindex)
661                                 continue;
662
663                         /* we only need n-1 clones; last_dst enqueued below */
664                         if (!last_dst) {
665                                 last_dst = dst;
666                                 continue;
667                         }
668
669                         err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
670                         if (err)
671                                 return err;
672
673                         last_dst = dst;
674                 }
675         } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
676                 for (i = 0; i < dtab->n_buckets; i++) {
677                         head = dev_map_index_hash(dtab, i);
678                         hlist_for_each_entry_safe(dst, next, head, index_hlist) {
679                                 if (!dst || dst->dev->ifindex == exclude_ifindex)
680                                         continue;
681
682                                 /* we only need n-1 clones; last_dst enqueued below */
683                                 if (!last_dst) {
684                                         last_dst = dst;
685                                         continue;
686                                 }
687
688                                 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
689                                 if (err)
690                                         return err;
691
692                                 last_dst = dst;
693                         }
694                 }
695         }
696
697         /* consume the first skb and return */
698         if (last_dst)
699                 return dev_map_generic_redirect(last_dst, skb, xdp_prog);
700
701         /* dtab is empty */
702         consume_skb(skb);
703         return 0;
704 }
705
706 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
707 {
708         struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
709
710         return obj ? &obj->val : NULL;
711 }
712
713 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
714 {
715         struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
716                                                                 *(u32 *)key);
717         return obj ? &obj->val : NULL;
718 }
719
720 static void __dev_map_entry_free(struct rcu_head *rcu)
721 {
722         struct bpf_dtab_netdev *dev;
723
724         dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
725         if (dev->xdp_prog)
726                 bpf_prog_put(dev->xdp_prog);
727         dev_put(dev->dev);
728         kfree(dev);
729 }
730
731 static int dev_map_delete_elem(struct bpf_map *map, void *key)
732 {
733         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
734         struct bpf_dtab_netdev *old_dev;
735         int k = *(u32 *)key;
736
737         if (k >= map->max_entries)
738                 return -EINVAL;
739
740         old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
741         if (old_dev)
742                 call_rcu(&old_dev->rcu, __dev_map_entry_free);
743         return 0;
744 }
745
746 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
747 {
748         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
749         struct bpf_dtab_netdev *old_dev;
750         int k = *(u32 *)key;
751         unsigned long flags;
752         int ret = -ENOENT;
753
754         spin_lock_irqsave(&dtab->index_lock, flags);
755
756         old_dev = __dev_map_hash_lookup_elem(map, k);
757         if (old_dev) {
758                 dtab->items--;
759                 hlist_del_init_rcu(&old_dev->index_hlist);
760                 call_rcu(&old_dev->rcu, __dev_map_entry_free);
761                 ret = 0;
762         }
763         spin_unlock_irqrestore(&dtab->index_lock, flags);
764
765         return ret;
766 }
767
768 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
769                                                     struct bpf_dtab *dtab,
770                                                     struct bpf_devmap_val *val,
771                                                     unsigned int idx)
772 {
773         struct bpf_prog *prog = NULL;
774         struct bpf_dtab_netdev *dev;
775
776         dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
777                                    GFP_ATOMIC | __GFP_NOWARN,
778                                    dtab->map.numa_node);
779         if (!dev)
780                 return ERR_PTR(-ENOMEM);
781
782         dev->dev = dev_get_by_index(net, val->ifindex);
783         if (!dev->dev)
784                 goto err_out;
785
786         if (val->bpf_prog.fd > 0) {
787                 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
788                                              BPF_PROG_TYPE_XDP, false);
789                 if (IS_ERR(prog))
790                         goto err_put_dev;
791                 if (prog->expected_attach_type != BPF_XDP_DEVMAP)
792                         goto err_put_prog;
793         }
794
795         dev->idx = idx;
796         dev->dtab = dtab;
797         if (prog) {
798                 dev->xdp_prog = prog;
799                 dev->val.bpf_prog.id = prog->aux->id;
800         } else {
801                 dev->xdp_prog = NULL;
802                 dev->val.bpf_prog.id = 0;
803         }
804         dev->val.ifindex = val->ifindex;
805
806         return dev;
807 err_put_prog:
808         bpf_prog_put(prog);
809 err_put_dev:
810         dev_put(dev->dev);
811 err_out:
812         kfree(dev);
813         return ERR_PTR(-EINVAL);
814 }
815
816 static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
817                                  void *key, void *value, u64 map_flags)
818 {
819         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
820         struct bpf_dtab_netdev *dev, *old_dev;
821         struct bpf_devmap_val val = {};
822         u32 i = *(u32 *)key;
823
824         if (unlikely(map_flags > BPF_EXIST))
825                 return -EINVAL;
826         if (unlikely(i >= dtab->map.max_entries))
827                 return -E2BIG;
828         if (unlikely(map_flags == BPF_NOEXIST))
829                 return -EEXIST;
830
831         /* already verified value_size <= sizeof val */
832         memcpy(&val, value, map->value_size);
833
834         if (!val.ifindex) {
835                 dev = NULL;
836                 /* can not specify fd if ifindex is 0 */
837                 if (val.bpf_prog.fd > 0)
838                         return -EINVAL;
839         } else {
840                 dev = __dev_map_alloc_node(net, dtab, &val, i);
841                 if (IS_ERR(dev))
842                         return PTR_ERR(dev);
843         }
844
845         /* Use call_rcu() here to ensure rcu critical sections have completed
846          * Remembering the driver side flush operation will happen before the
847          * net device is removed.
848          */
849         old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
850         if (old_dev)
851                 call_rcu(&old_dev->rcu, __dev_map_entry_free);
852
853         return 0;
854 }
855
856 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
857                                u64 map_flags)
858 {
859         return __dev_map_update_elem(current->nsproxy->net_ns,
860                                      map, key, value, map_flags);
861 }
862
863 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
864                                      void *key, void *value, u64 map_flags)
865 {
866         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
867         struct bpf_dtab_netdev *dev, *old_dev;
868         struct bpf_devmap_val val = {};
869         u32 idx = *(u32 *)key;
870         unsigned long flags;
871         int err = -EEXIST;
872
873         /* already verified value_size <= sizeof val */
874         memcpy(&val, value, map->value_size);
875
876         if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
877                 return -EINVAL;
878
879         spin_lock_irqsave(&dtab->index_lock, flags);
880
881         old_dev = __dev_map_hash_lookup_elem(map, idx);
882         if (old_dev && (map_flags & BPF_NOEXIST))
883                 goto out_err;
884
885         dev = __dev_map_alloc_node(net, dtab, &val, idx);
886         if (IS_ERR(dev)) {
887                 err = PTR_ERR(dev);
888                 goto out_err;
889         }
890
891         if (old_dev) {
892                 hlist_del_rcu(&old_dev->index_hlist);
893         } else {
894                 if (dtab->items >= dtab->map.max_entries) {
895                         spin_unlock_irqrestore(&dtab->index_lock, flags);
896                         call_rcu(&dev->rcu, __dev_map_entry_free);
897                         return -E2BIG;
898                 }
899                 dtab->items++;
900         }
901
902         hlist_add_head_rcu(&dev->index_hlist,
903                            dev_map_index_hash(dtab, idx));
904         spin_unlock_irqrestore(&dtab->index_lock, flags);
905
906         if (old_dev)
907                 call_rcu(&old_dev->rcu, __dev_map_entry_free);
908
909         return 0;
910
911 out_err:
912         spin_unlock_irqrestore(&dtab->index_lock, flags);
913         return err;
914 }
915
916 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
917                                    u64 map_flags)
918 {
919         return __dev_map_hash_update_elem(current->nsproxy->net_ns,
920                                          map, key, value, map_flags);
921 }
922
923 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
924 {
925         return __bpf_xdp_redirect_map(map, ifindex, flags,
926                                       BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
927                                       __dev_map_lookup_elem);
928 }
929
930 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
931 {
932         return __bpf_xdp_redirect_map(map, ifindex, flags,
933                                       BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
934                                       __dev_map_hash_lookup_elem);
935 }
936
937 static int dev_map_btf_id;
938 const struct bpf_map_ops dev_map_ops = {
939         .map_meta_equal = bpf_map_meta_equal,
940         .map_alloc = dev_map_alloc,
941         .map_free = dev_map_free,
942         .map_get_next_key = dev_map_get_next_key,
943         .map_lookup_elem = dev_map_lookup_elem,
944         .map_update_elem = dev_map_update_elem,
945         .map_delete_elem = dev_map_delete_elem,
946         .map_check_btf = map_check_no_btf,
947         .map_btf_name = "bpf_dtab",
948         .map_btf_id = &dev_map_btf_id,
949         .map_redirect = dev_map_redirect,
950 };
951
952 static int dev_map_hash_map_btf_id;
953 const struct bpf_map_ops dev_map_hash_ops = {
954         .map_meta_equal = bpf_map_meta_equal,
955         .map_alloc = dev_map_alloc,
956         .map_free = dev_map_free,
957         .map_get_next_key = dev_map_hash_get_next_key,
958         .map_lookup_elem = dev_map_hash_lookup_elem,
959         .map_update_elem = dev_map_hash_update_elem,
960         .map_delete_elem = dev_map_hash_delete_elem,
961         .map_check_btf = map_check_no_btf,
962         .map_btf_name = "bpf_dtab",
963         .map_btf_id = &dev_map_hash_map_btf_id,
964         .map_redirect = dev_hash_map_redirect,
965 };
966
967 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
968                                        struct net_device *netdev)
969 {
970         unsigned long flags;
971         u32 i;
972
973         spin_lock_irqsave(&dtab->index_lock, flags);
974         for (i = 0; i < dtab->n_buckets; i++) {
975                 struct bpf_dtab_netdev *dev;
976                 struct hlist_head *head;
977                 struct hlist_node *next;
978
979                 head = dev_map_index_hash(dtab, i);
980
981                 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
982                         if (netdev != dev->dev)
983                                 continue;
984
985                         dtab->items--;
986                         hlist_del_rcu(&dev->index_hlist);
987                         call_rcu(&dev->rcu, __dev_map_entry_free);
988                 }
989         }
990         spin_unlock_irqrestore(&dtab->index_lock, flags);
991 }
992
993 static int dev_map_notification(struct notifier_block *notifier,
994                                 ulong event, void *ptr)
995 {
996         struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
997         struct bpf_dtab *dtab;
998         int i, cpu;
999
1000         switch (event) {
1001         case NETDEV_REGISTER:
1002                 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1003                         break;
1004
1005                 /* will be freed in free_netdev() */
1006                 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1007                 if (!netdev->xdp_bulkq)
1008                         return NOTIFY_BAD;
1009
1010                 for_each_possible_cpu(cpu)
1011                         per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1012                 break;
1013         case NETDEV_UNREGISTER:
1014                 /* This rcu_read_lock/unlock pair is needed because
1015                  * dev_map_list is an RCU list AND to ensure a delete
1016                  * operation does not free a netdev_map entry while we
1017                  * are comparing it against the netdev being unregistered.
1018                  */
1019                 rcu_read_lock();
1020                 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1021                         if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1022                                 dev_map_hash_remove_netdev(dtab, netdev);
1023                                 continue;
1024                         }
1025
1026                         for (i = 0; i < dtab->map.max_entries; i++) {
1027                                 struct bpf_dtab_netdev *dev, *odev;
1028
1029                                 dev = rcu_dereference(dtab->netdev_map[i]);
1030                                 if (!dev || netdev != dev->dev)
1031                                         continue;
1032                                 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1033                                 if (dev == odev)
1034                                         call_rcu(&dev->rcu,
1035                                                  __dev_map_entry_free);
1036                         }
1037                 }
1038                 rcu_read_unlock();
1039                 break;
1040         default:
1041                 break;
1042         }
1043         return NOTIFY_OK;
1044 }
1045
1046 static struct notifier_block dev_map_notifier = {
1047         .notifier_call = dev_map_notification,
1048 };
1049
1050 static int __init dev_map_init(void)
1051 {
1052         int cpu;
1053
1054         /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1055         BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1056                      offsetof(struct _bpf_dtab_netdev, dev));
1057         register_netdevice_notifier(&dev_map_notifier);
1058
1059         for_each_possible_cpu(cpu)
1060                 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1061         return 0;
1062 }
1063
1064 subsys_initcall(dev_map_init);