1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
47 #include <linux/bpf.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
52 #define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 struct xdp_dev_bulk_queue {
56 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
57 struct list_head flush_node;
58 struct net_device *dev;
59 struct net_device *dev_rx;
60 struct bpf_prog *xdp_prog;
64 struct bpf_dtab_netdev {
65 struct net_device *dev; /* must be first member, due to tracepoint */
66 struct hlist_node index_hlist;
67 struct bpf_dtab *dtab;
68 struct bpf_prog *xdp_prog;
71 struct bpf_devmap_val val;
76 struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77 struct list_head list;
79 /* these are only used for DEVMAP_HASH type maps */
80 struct hlist_head *dev_index_head;
81 spinlock_t index_lock;
86 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
87 static DEFINE_SPINLOCK(dev_map_lock);
88 static LIST_HEAD(dev_map_list);
90 static struct hlist_head *dev_map_create_hash(unsigned int entries,
94 struct hlist_head *hash;
96 hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
98 for (i = 0; i < entries; i++)
99 INIT_HLIST_HEAD(&hash[i]);
104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
107 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
112 u32 valsize = attr->value_size;
114 /* check sanity of attributes. 2 value sizes supported:
116 * 8 bytes: ifindex + prog fd
118 if (attr->max_entries == 0 || attr->key_size != 4 ||
119 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
120 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
121 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
124 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
125 * verifier prevents writes from the BPF side
127 attr->map_flags |= BPF_F_RDONLY_PROG;
130 bpf_map_init_from_attr(&dtab->map, attr);
132 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
133 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
135 if (!dtab->n_buckets) /* Overflow check */
139 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
140 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
141 dtab->map.numa_node);
142 if (!dtab->dev_index_head)
145 spin_lock_init(&dtab->index_lock);
147 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
148 sizeof(struct bpf_dtab_netdev *),
149 dtab->map.numa_node);
150 if (!dtab->netdev_map)
157 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
159 struct bpf_dtab *dtab;
162 if (!capable(CAP_NET_ADMIN))
163 return ERR_PTR(-EPERM);
165 dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT);
167 return ERR_PTR(-ENOMEM);
169 err = dev_map_init_map(dtab, attr);
175 spin_lock(&dev_map_lock);
176 list_add_tail_rcu(&dtab->list, &dev_map_list);
177 spin_unlock(&dev_map_lock);
182 static void dev_map_free(struct bpf_map *map)
184 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
187 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
188 * so the programs (can be more than one that used this map) were
189 * disconnected from events. The following synchronize_rcu() guarantees
190 * both rcu read critical sections complete and waits for
191 * preempt-disable regions (NAPI being the relevant context here) so we
192 * are certain there will be no further reads against the netdev_map and
193 * all flush operations are complete. Flush operations can only be done
194 * from NAPI context for this reason.
197 spin_lock(&dev_map_lock);
198 list_del_rcu(&dtab->list);
199 spin_unlock(&dev_map_lock);
201 bpf_clear_redirect_map(map);
204 /* Make sure prior __dev_map_entry_free() have completed. */
207 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
208 for (i = 0; i < dtab->n_buckets; i++) {
209 struct bpf_dtab_netdev *dev;
210 struct hlist_head *head;
211 struct hlist_node *next;
213 head = dev_map_index_hash(dtab, i);
215 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
216 hlist_del_rcu(&dev->index_hlist);
218 bpf_prog_put(dev->xdp_prog);
224 bpf_map_area_free(dtab->dev_index_head);
226 for (i = 0; i < dtab->map.max_entries; i++) {
227 struct bpf_dtab_netdev *dev;
229 dev = rcu_dereference_raw(dtab->netdev_map[i]);
234 bpf_prog_put(dev->xdp_prog);
239 bpf_map_area_free(dtab->netdev_map);
245 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
247 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
248 u32 index = key ? *(u32 *)key : U32_MAX;
249 u32 *next = next_key;
251 if (index >= dtab->map.max_entries) {
256 if (index == dtab->map.max_entries - 1)
262 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
263 * by local_bh_disable() (from XDP calls inside NAPI). The
264 * rcu_read_lock_bh_held() below makes lockdep accept both.
266 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
268 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
269 struct hlist_head *head = dev_map_index_hash(dtab, key);
270 struct bpf_dtab_netdev *dev;
272 hlist_for_each_entry_rcu(dev, head, index_hlist,
273 lockdep_is_held(&dtab->index_lock))
280 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
283 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
284 u32 idx, *next = next_key;
285 struct bpf_dtab_netdev *dev, *next_dev;
286 struct hlist_head *head;
294 dev = __dev_map_hash_lookup_elem(map, idx);
298 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
299 struct bpf_dtab_netdev, index_hlist);
302 *next = next_dev->idx;
306 i = idx & (dtab->n_buckets - 1);
310 for (; i < dtab->n_buckets; i++) {
311 head = dev_map_index_hash(dtab, i);
313 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
314 struct bpf_dtab_netdev,
317 *next = next_dev->idx;
325 bool dev_map_can_have_prog(struct bpf_map *map)
327 if ((map->map_type == BPF_MAP_TYPE_DEVMAP ||
328 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) &&
329 map->value_size != offsetofend(struct bpf_devmap_val, ifindex))
335 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
336 struct xdp_frame **frames, int n,
337 struct net_device *dev)
339 struct xdp_txq_info txq = { .dev = dev };
343 for (i = 0; i < n; i++) {
344 struct xdp_frame *xdpf = frames[i];
348 xdp_convert_frame_to_buff(xdpf, &xdp);
351 act = bpf_prog_run_xdp(xdp_prog, &xdp);
354 err = xdp_update_frame_from_buff(&xdp, xdpf);
355 if (unlikely(err < 0))
356 xdp_return_frame_rx_napi(xdpf);
358 frames[nframes++] = xdpf;
361 bpf_warn_invalid_xdp_action(act);
364 trace_xdp_exception(dev, xdp_prog, act);
367 xdp_return_frame_rx_napi(xdpf);
371 return nframes; /* sent frames count */
374 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
376 struct net_device *dev = bq->dev;
377 unsigned int cnt = bq->count;
378 int sent = 0, err = 0;
385 for (i = 0; i < cnt; i++) {
386 struct xdp_frame *xdpf = bq->q[i];
392 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
397 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
399 /* If ndo_xdp_xmit fails with an errno, no frames have
406 /* If not all frames have been transmitted, it is our
407 * responsibility to free them
409 for (i = sent; unlikely(i < to_send); i++)
410 xdp_return_frame_rx_napi(bq->q[i]);
414 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
417 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
418 * driver before returning from its napi->poll() routine. See the comment above
419 * xdp_do_flush() in filter.c.
421 void __dev_flush(void)
423 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
424 struct xdp_dev_bulk_queue *bq, *tmp;
426 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
427 bq_xmit_all(bq, XDP_XMIT_FLUSH);
430 __list_del_clearprev(&bq->flush_node);
434 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
435 * by local_bh_disable() (from XDP calls inside NAPI). The
436 * rcu_read_lock_bh_held() below makes lockdep accept both.
438 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
440 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
441 struct bpf_dtab_netdev *obj;
443 if (key >= map->max_entries)
446 obj = rcu_dereference_check(dtab->netdev_map[key],
447 rcu_read_lock_bh_held());
451 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
452 * variable access, and map elements stick around. See comment above
453 * xdp_do_flush() in filter.c.
455 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
456 struct net_device *dev_rx, struct bpf_prog *xdp_prog)
458 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
459 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
461 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
464 /* Ingress dev_rx will be the same for all xdp_frame's in
465 * bulk_queue, because bq stored per-CPU and must be flushed
466 * from net_device drivers NAPI func end.
468 * Do the same with xdp_prog and flush_list since these fields
469 * are only ever modified together.
473 bq->xdp_prog = xdp_prog;
474 list_add(&bq->flush_node, flush_list);
477 bq->q[bq->count++] = xdpf;
480 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
481 struct net_device *dev_rx,
482 struct bpf_prog *xdp_prog)
484 struct xdp_frame *xdpf;
487 if (!dev->netdev_ops->ndo_xdp_xmit)
490 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
494 xdpf = xdp_convert_buff_to_frame(xdp);
498 bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
502 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
503 struct net_device *dev_rx)
505 return __xdp_enqueue(dev, xdp, dev_rx, NULL);
508 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
509 struct net_device *dev_rx)
511 struct net_device *dev = dst->dev;
513 return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
516 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp,
519 if (!obj || obj->dev->ifindex == exclude_ifindex ||
520 !obj->dev->netdev_ops->ndo_xdp_xmit)
523 if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data))
529 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
530 struct net_device *dev_rx,
531 struct xdp_frame *xdpf)
533 struct xdp_frame *nxdpf;
535 nxdpf = xdpf_clone(xdpf);
539 bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
544 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
545 struct bpf_map *map, bool exclude_ingress)
547 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
548 int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0;
549 struct bpf_dtab_netdev *dst, *last_dst = NULL;
550 struct hlist_head *head;
551 struct xdp_frame *xdpf;
555 xdpf = xdp_convert_buff_to_frame(xdp);
559 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
560 for (i = 0; i < map->max_entries; i++) {
561 dst = rcu_dereference_check(dtab->netdev_map[i],
562 rcu_read_lock_bh_held());
563 if (!is_valid_dst(dst, xdp, exclude_ifindex))
566 /* we only need n-1 clones; last_dst enqueued below */
572 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
578 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
579 for (i = 0; i < dtab->n_buckets; i++) {
580 head = dev_map_index_hash(dtab, i);
581 hlist_for_each_entry_rcu(dst, head, index_hlist,
582 lockdep_is_held(&dtab->index_lock)) {
583 if (!is_valid_dst(dst, xdp, exclude_ifindex))
586 /* we only need n-1 clones; last_dst enqueued below */
592 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
601 /* consume the last copy of the frame */
603 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
605 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
610 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
611 struct bpf_prog *xdp_prog)
615 err = xdp_ok_fwd_dev(dst->dev, skb->len);
619 generic_xdp_tx(skb, xdp_prog);
624 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
626 struct bpf_prog *xdp_prog)
628 struct sk_buff *nskb;
631 nskb = skb_clone(skb, GFP_ATOMIC);
635 err = dev_map_generic_redirect(dst, nskb, xdp_prog);
644 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
645 struct bpf_prog *xdp_prog, struct bpf_map *map,
646 bool exclude_ingress)
648 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
649 int exclude_ifindex = exclude_ingress ? dev->ifindex : 0;
650 struct bpf_dtab_netdev *dst, *last_dst = NULL;
651 struct hlist_head *head;
652 struct hlist_node *next;
656 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
657 for (i = 0; i < map->max_entries; i++) {
658 dst = rcu_dereference_check(dtab->netdev_map[i],
659 rcu_read_lock_bh_held());
660 if (!dst || dst->dev->ifindex == exclude_ifindex)
663 /* we only need n-1 clones; last_dst enqueued below */
669 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
675 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
676 for (i = 0; i < dtab->n_buckets; i++) {
677 head = dev_map_index_hash(dtab, i);
678 hlist_for_each_entry_safe(dst, next, head, index_hlist) {
679 if (!dst || dst->dev->ifindex == exclude_ifindex)
682 /* we only need n-1 clones; last_dst enqueued below */
688 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
697 /* consume the first skb and return */
699 return dev_map_generic_redirect(last_dst, skb, xdp_prog);
706 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
708 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
710 return obj ? &obj->val : NULL;
713 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
715 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
717 return obj ? &obj->val : NULL;
720 static void __dev_map_entry_free(struct rcu_head *rcu)
722 struct bpf_dtab_netdev *dev;
724 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
726 bpf_prog_put(dev->xdp_prog);
731 static int dev_map_delete_elem(struct bpf_map *map, void *key)
733 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
734 struct bpf_dtab_netdev *old_dev;
737 if (k >= map->max_entries)
740 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
742 call_rcu(&old_dev->rcu, __dev_map_entry_free);
746 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
748 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
749 struct bpf_dtab_netdev *old_dev;
754 spin_lock_irqsave(&dtab->index_lock, flags);
756 old_dev = __dev_map_hash_lookup_elem(map, k);
759 hlist_del_init_rcu(&old_dev->index_hlist);
760 call_rcu(&old_dev->rcu, __dev_map_entry_free);
763 spin_unlock_irqrestore(&dtab->index_lock, flags);
768 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
769 struct bpf_dtab *dtab,
770 struct bpf_devmap_val *val,
773 struct bpf_prog *prog = NULL;
774 struct bpf_dtab_netdev *dev;
776 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
777 GFP_ATOMIC | __GFP_NOWARN,
778 dtab->map.numa_node);
780 return ERR_PTR(-ENOMEM);
782 dev->dev = dev_get_by_index(net, val->ifindex);
786 if (val->bpf_prog.fd > 0) {
787 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
788 BPF_PROG_TYPE_XDP, false);
791 if (prog->expected_attach_type != BPF_XDP_DEVMAP)
798 dev->xdp_prog = prog;
799 dev->val.bpf_prog.id = prog->aux->id;
801 dev->xdp_prog = NULL;
802 dev->val.bpf_prog.id = 0;
804 dev->val.ifindex = val->ifindex;
813 return ERR_PTR(-EINVAL);
816 static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
817 void *key, void *value, u64 map_flags)
819 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
820 struct bpf_dtab_netdev *dev, *old_dev;
821 struct bpf_devmap_val val = {};
824 if (unlikely(map_flags > BPF_EXIST))
826 if (unlikely(i >= dtab->map.max_entries))
828 if (unlikely(map_flags == BPF_NOEXIST))
831 /* already verified value_size <= sizeof val */
832 memcpy(&val, value, map->value_size);
836 /* can not specify fd if ifindex is 0 */
837 if (val.bpf_prog.fd > 0)
840 dev = __dev_map_alloc_node(net, dtab, &val, i);
845 /* Use call_rcu() here to ensure rcu critical sections have completed
846 * Remembering the driver side flush operation will happen before the
847 * net device is removed.
849 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
851 call_rcu(&old_dev->rcu, __dev_map_entry_free);
856 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
859 return __dev_map_update_elem(current->nsproxy->net_ns,
860 map, key, value, map_flags);
863 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
864 void *key, void *value, u64 map_flags)
866 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
867 struct bpf_dtab_netdev *dev, *old_dev;
868 struct bpf_devmap_val val = {};
869 u32 idx = *(u32 *)key;
873 /* already verified value_size <= sizeof val */
874 memcpy(&val, value, map->value_size);
876 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
879 spin_lock_irqsave(&dtab->index_lock, flags);
881 old_dev = __dev_map_hash_lookup_elem(map, idx);
882 if (old_dev && (map_flags & BPF_NOEXIST))
885 dev = __dev_map_alloc_node(net, dtab, &val, idx);
892 hlist_del_rcu(&old_dev->index_hlist);
894 if (dtab->items >= dtab->map.max_entries) {
895 spin_unlock_irqrestore(&dtab->index_lock, flags);
896 call_rcu(&dev->rcu, __dev_map_entry_free);
902 hlist_add_head_rcu(&dev->index_hlist,
903 dev_map_index_hash(dtab, idx));
904 spin_unlock_irqrestore(&dtab->index_lock, flags);
907 call_rcu(&old_dev->rcu, __dev_map_entry_free);
912 spin_unlock_irqrestore(&dtab->index_lock, flags);
916 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
919 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
920 map, key, value, map_flags);
923 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
925 return __bpf_xdp_redirect_map(map, ifindex, flags,
926 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
927 __dev_map_lookup_elem);
930 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
932 return __bpf_xdp_redirect_map(map, ifindex, flags,
933 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
934 __dev_map_hash_lookup_elem);
937 static int dev_map_btf_id;
938 const struct bpf_map_ops dev_map_ops = {
939 .map_meta_equal = bpf_map_meta_equal,
940 .map_alloc = dev_map_alloc,
941 .map_free = dev_map_free,
942 .map_get_next_key = dev_map_get_next_key,
943 .map_lookup_elem = dev_map_lookup_elem,
944 .map_update_elem = dev_map_update_elem,
945 .map_delete_elem = dev_map_delete_elem,
946 .map_check_btf = map_check_no_btf,
947 .map_btf_name = "bpf_dtab",
948 .map_btf_id = &dev_map_btf_id,
949 .map_redirect = dev_map_redirect,
952 static int dev_map_hash_map_btf_id;
953 const struct bpf_map_ops dev_map_hash_ops = {
954 .map_meta_equal = bpf_map_meta_equal,
955 .map_alloc = dev_map_alloc,
956 .map_free = dev_map_free,
957 .map_get_next_key = dev_map_hash_get_next_key,
958 .map_lookup_elem = dev_map_hash_lookup_elem,
959 .map_update_elem = dev_map_hash_update_elem,
960 .map_delete_elem = dev_map_hash_delete_elem,
961 .map_check_btf = map_check_no_btf,
962 .map_btf_name = "bpf_dtab",
963 .map_btf_id = &dev_map_hash_map_btf_id,
964 .map_redirect = dev_hash_map_redirect,
967 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
968 struct net_device *netdev)
973 spin_lock_irqsave(&dtab->index_lock, flags);
974 for (i = 0; i < dtab->n_buckets; i++) {
975 struct bpf_dtab_netdev *dev;
976 struct hlist_head *head;
977 struct hlist_node *next;
979 head = dev_map_index_hash(dtab, i);
981 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
982 if (netdev != dev->dev)
986 hlist_del_rcu(&dev->index_hlist);
987 call_rcu(&dev->rcu, __dev_map_entry_free);
990 spin_unlock_irqrestore(&dtab->index_lock, flags);
993 static int dev_map_notification(struct notifier_block *notifier,
994 ulong event, void *ptr)
996 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
997 struct bpf_dtab *dtab;
1001 case NETDEV_REGISTER:
1002 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1005 /* will be freed in free_netdev() */
1006 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1007 if (!netdev->xdp_bulkq)
1010 for_each_possible_cpu(cpu)
1011 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1013 case NETDEV_UNREGISTER:
1014 /* This rcu_read_lock/unlock pair is needed because
1015 * dev_map_list is an RCU list AND to ensure a delete
1016 * operation does not free a netdev_map entry while we
1017 * are comparing it against the netdev being unregistered.
1020 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1021 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1022 dev_map_hash_remove_netdev(dtab, netdev);
1026 for (i = 0; i < dtab->map.max_entries; i++) {
1027 struct bpf_dtab_netdev *dev, *odev;
1029 dev = rcu_dereference(dtab->netdev_map[i]);
1030 if (!dev || netdev != dev->dev)
1032 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1035 __dev_map_entry_free);
1046 static struct notifier_block dev_map_notifier = {
1047 .notifier_call = dev_map_notification,
1050 static int __init dev_map_init(void)
1054 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1055 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1056 offsetof(struct _bpf_dtab_netdev, dev));
1057 register_netdevice_notifier(&dev_map_notifier);
1059 for_each_possible_cpu(cpu)
1060 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1064 subsys_initcall(dev_map_init);