1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
41 #include <linux/bpf.h>
43 #include <linux/filter.h>
44 #include <trace/events/xdp.h>
46 #define DEV_CREATE_FLAG_MASK \
47 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
49 #define DEV_MAP_BULK_SIZE 16
50 struct bpf_dtab_netdev;
52 struct xdp_bulk_queue {
53 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
54 struct list_head flush_node;
55 struct net_device *dev_rx;
56 struct bpf_dtab_netdev *obj;
60 struct bpf_dtab_netdev {
61 struct net_device *dev; /* must be first member, due to tracepoint */
62 struct bpf_dtab *dtab;
64 struct xdp_bulk_queue __percpu *bulkq;
70 struct bpf_dtab_netdev **netdev_map;
71 struct list_head __percpu *flush_list;
72 struct list_head list;
75 static DEFINE_SPINLOCK(dev_map_lock);
76 static LIST_HEAD(dev_map_list);
78 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
80 struct bpf_dtab *dtab;
84 if (!capable(CAP_NET_ADMIN))
85 return ERR_PTR(-EPERM);
87 /* check sanity of attributes */
88 if (attr->max_entries == 0 || attr->key_size != 4 ||
89 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
90 return ERR_PTR(-EINVAL);
92 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
93 * verifier prevents writes from the BPF side
95 attr->map_flags |= BPF_F_RDONLY_PROG;
97 dtab = kzalloc(sizeof(*dtab), GFP_USER);
99 return ERR_PTR(-ENOMEM);
101 bpf_map_init_from_attr(&dtab->map, attr);
103 /* make sure page count doesn't overflow */
104 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
105 cost += sizeof(struct list_head) * num_possible_cpus();
107 /* if map size is larger than memlock limit, reject it */
108 err = bpf_map_charge_init(&dtab->map.memory, cost);
114 dtab->flush_list = alloc_percpu(struct list_head);
115 if (!dtab->flush_list)
118 for_each_possible_cpu(cpu)
119 INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
121 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
122 sizeof(struct bpf_dtab_netdev *),
123 dtab->map.numa_node);
124 if (!dtab->netdev_map)
127 spin_lock(&dev_map_lock);
128 list_add_tail_rcu(&dtab->list, &dev_map_list);
129 spin_unlock(&dev_map_lock);
134 free_percpu(dtab->flush_list);
136 bpf_map_charge_finish(&dtab->map.memory);
142 static void dev_map_free(struct bpf_map *map)
144 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
147 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
148 * so the programs (can be more than one that used this map) were
149 * disconnected from events. Wait for outstanding critical sections in
150 * these programs to complete. The rcu critical section only guarantees
151 * no further reads against netdev_map. It does __not__ ensure pending
152 * flush operations (if any) are complete.
155 spin_lock(&dev_map_lock);
156 list_del_rcu(&dtab->list);
157 spin_unlock(&dev_map_lock);
159 bpf_clear_redirect_map(map);
162 /* Make sure prior __dev_map_entry_free() have completed. */
165 /* To ensure all pending flush operations have completed wait for flush
166 * list to empty on _all_ cpus.
167 * Because the above synchronize_rcu() ensures the map is disconnected
168 * from the program we can assume no new items will be added.
170 for_each_online_cpu(cpu) {
171 struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
173 while (!list_empty(flush_list))
177 for (i = 0; i < dtab->map.max_entries; i++) {
178 struct bpf_dtab_netdev *dev;
180 dev = dtab->netdev_map[i];
184 free_percpu(dev->bulkq);
189 free_percpu(dtab->flush_list);
190 bpf_map_area_free(dtab->netdev_map);
194 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
196 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
197 u32 index = key ? *(u32 *)key : U32_MAX;
198 u32 *next = next_key;
200 if (index >= dtab->map.max_entries) {
205 if (index == dtab->map.max_entries - 1)
211 static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
214 struct bpf_dtab_netdev *obj = bq->obj;
215 struct net_device *dev = obj->dev;
216 int sent = 0, drops = 0, err = 0;
219 if (unlikely(!bq->count))
222 for (i = 0; i < bq->count; i++) {
223 struct xdp_frame *xdpf = bq->q[i];
228 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
234 drops = bq->count - sent;
238 trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,
239 sent, drops, bq->dev_rx, dev, err);
241 __list_del_clearprev(&bq->flush_node);
244 /* If ndo_xdp_xmit fails with an errno, no frames have been
245 * xmit'ed and it's our responsibility to them free all.
247 for (i = 0; i < bq->count; i++) {
248 struct xdp_frame *xdpf = bq->q[i];
250 /* RX path under NAPI protection, can return frames faster */
251 if (likely(in_napi_ctx))
252 xdp_return_frame_rx_napi(xdpf);
254 xdp_return_frame(xdpf);
260 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
261 * from the driver before returning from its napi->poll() routine. The poll()
262 * routine is called either from busy_poll context or net_rx_action signaled
263 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
264 * net device can be torn down. On devmap tear down we ensure the flush list
265 * is empty before completing to ensure all flush operations have completed.
267 void __dev_map_flush(struct bpf_map *map)
269 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
270 struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
271 struct xdp_bulk_queue *bq, *tmp;
274 list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
275 bq_xmit_all(bq, XDP_XMIT_FLUSH, true);
279 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
280 * update happens in parallel here a dev_put wont happen until after reading the
283 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
285 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
286 struct bpf_dtab_netdev *obj;
288 if (key >= map->max_entries)
291 obj = READ_ONCE(dtab->netdev_map[key]);
295 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
296 * Thus, safe percpu variable access.
298 static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
299 struct net_device *dev_rx)
302 struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);
303 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
305 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
306 bq_xmit_all(bq, 0, true);
308 /* Ingress dev_rx will be the same for all xdp_frame's in
309 * bulk_queue, because bq stored per-CPU and must be flushed
310 * from net_device drivers NAPI func end.
315 bq->q[bq->count++] = xdpf;
317 if (!bq->flush_node.prev)
318 list_add(&bq->flush_node, flush_list);
323 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
324 struct net_device *dev_rx)
326 struct net_device *dev = dst->dev;
327 struct xdp_frame *xdpf;
330 if (!dev->netdev_ops->ndo_xdp_xmit)
333 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
337 xdpf = convert_to_xdp_frame(xdp);
341 return bq_enqueue(dst, xdpf, dev_rx);
344 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
345 struct bpf_prog *xdp_prog)
349 err = xdp_ok_fwd_dev(dst->dev, skb->len);
353 generic_xdp_tx(skb, xdp_prog);
358 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
360 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
361 struct net_device *dev = obj ? obj->dev : NULL;
363 return dev ? &dev->ifindex : NULL;
366 static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
368 if (dev->dev->netdev_ops->ndo_xdp_xmit) {
369 struct xdp_bulk_queue *bq;
373 for_each_online_cpu(cpu) {
374 bq = per_cpu_ptr(dev->bulkq, cpu);
375 bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
381 static void __dev_map_entry_free(struct rcu_head *rcu)
383 struct bpf_dtab_netdev *dev;
385 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
386 dev_map_flush_old(dev);
387 free_percpu(dev->bulkq);
392 static int dev_map_delete_elem(struct bpf_map *map, void *key)
394 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
395 struct bpf_dtab_netdev *old_dev;
398 if (k >= map->max_entries)
401 /* Use call_rcu() here to ensure any rcu critical sections have
402 * completed, but this does not guarantee a flush has happened
403 * yet. Because driver side rcu_read_lock/unlock only protects the
404 * running XDP program. However, for pending flush operations the
405 * dev and ctx are stored in another per cpu map. And additionally,
406 * the driver tear down ensures all soft irqs are complete before
407 * removing the net device in the case of dev_put equals zero.
409 old_dev = xchg(&dtab->netdev_map[k], NULL);
411 call_rcu(&old_dev->rcu, __dev_map_entry_free);
415 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
418 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
419 struct net *net = current->nsproxy->net_ns;
420 gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
421 struct bpf_dtab_netdev *dev, *old_dev;
422 u32 ifindex = *(u32 *)value;
423 struct xdp_bulk_queue *bq;
427 if (unlikely(map_flags > BPF_EXIST))
429 if (unlikely(i >= dtab->map.max_entries))
431 if (unlikely(map_flags == BPF_NOEXIST))
437 dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node);
441 dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
442 sizeof(void *), gfp);
448 for_each_possible_cpu(cpu) {
449 bq = per_cpu_ptr(dev->bulkq, cpu);
453 dev->dev = dev_get_by_index(net, ifindex);
455 free_percpu(dev->bulkq);
464 /* Use call_rcu() here to ensure rcu critical sections have completed
465 * Remembering the driver side flush operation will happen before the
466 * net device is removed.
468 old_dev = xchg(&dtab->netdev_map[i], dev);
470 call_rcu(&old_dev->rcu, __dev_map_entry_free);
475 const struct bpf_map_ops dev_map_ops = {
476 .map_alloc = dev_map_alloc,
477 .map_free = dev_map_free,
478 .map_get_next_key = dev_map_get_next_key,
479 .map_lookup_elem = dev_map_lookup_elem,
480 .map_update_elem = dev_map_update_elem,
481 .map_delete_elem = dev_map_delete_elem,
482 .map_check_btf = map_check_no_btf,
485 static int dev_map_notification(struct notifier_block *notifier,
486 ulong event, void *ptr)
488 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
489 struct bpf_dtab *dtab;
493 case NETDEV_UNREGISTER:
494 /* This rcu_read_lock/unlock pair is needed because
495 * dev_map_list is an RCU list AND to ensure a delete
496 * operation does not free a netdev_map entry while we
497 * are comparing it against the netdev being unregistered.
500 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
501 for (i = 0; i < dtab->map.max_entries; i++) {
502 struct bpf_dtab_netdev *dev, *odev;
504 dev = READ_ONCE(dtab->netdev_map[i]);
505 if (!dev || netdev != dev->dev)
507 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
510 __dev_map_entry_free);
521 static struct notifier_block dev_map_notifier = {
522 .notifier_call = dev_map_notification,
525 static int __init dev_map_init(void)
527 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
528 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
529 offsetof(struct _bpf_dtab_netdev, dev));
530 register_netdevice_notifier(&dev_map_notifier);
534 subsys_initcall(dev_map_init);