1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (c) 2020 Facebook Inc.
4 #include <linux/netdevice.h>
5 #include <linux/slab.h>
6 #include <linux/types.h>
7 #include <linux/workqueue.h>
8 #include <net/udp_tunnel.h>
10 enum udp_tunnel_nic_table_entry_flags {
11 UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0),
12 UDP_TUNNEL_NIC_ENTRY_DEL = BIT(1),
13 UDP_TUNNEL_NIC_ENTRY_OP_FAIL = BIT(2),
14 UDP_TUNNEL_NIC_ENTRY_FROZEN = BIT(3),
17 struct udp_tunnel_nic_table_entry {
26 * struct udp_tunnel_nic - UDP tunnel port offload state
27 * @work: async work for talking to hardware from process context
28 * @dev: netdev pointer
29 * @need_sync: at least one port start changed
30 * @need_replay: space was freed, we need a replay of all ports
31 * @work_pending: @work is currently scheduled
32 * @n_tables: number of tables under @entries
33 * @missed: bitmap of tables which overflown
34 * @entries: table of tables of ports currently offloaded
36 struct udp_tunnel_nic {
37 struct work_struct work;
39 struct net_device *dev;
45 unsigned int n_tables;
47 struct udp_tunnel_nic_table_entry **entries;
50 /* We ensure all work structs are done using driver state, but not the code.
51 * We need a workqueue we can flush before module gets removed.
53 static struct workqueue_struct *udp_tunnel_nic_workqueue;
55 static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type)
58 case UDP_TUNNEL_TYPE_VXLAN:
60 case UDP_TUNNEL_TYPE_GENEVE:
62 case UDP_TUNNEL_TYPE_VXLAN_GPE:
70 udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry)
72 return entry->use_cnt == 0 && !entry->flags;
76 udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry)
78 return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN;
82 udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry)
84 if (!udp_tunnel_nic_entry_is_free(entry))
85 entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN;
89 udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry)
91 entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN;
95 udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry)
97 return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD |
98 UDP_TUNNEL_NIC_ENTRY_DEL);
102 udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
103 struct udp_tunnel_nic_table_entry *entry,
106 entry->flags |= flag;
111 udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry,
112 struct udp_tunnel_info *ti)
114 memset(ti, 0, sizeof(*ti));
115 ti->port = entry->port;
116 ti->type = entry->type;
117 ti->hw_priv = entry->hw_priv;
121 udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
123 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
126 for (i = 0; i < utn->n_tables; i++)
127 for (j = 0; j < info->tables[i].n_entries; j++)
128 if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
134 udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
136 const struct udp_tunnel_nic_table_info *table;
142 for (i = 0; i < utn->n_tables; i++) {
143 table = &dev->udp_tunnel_nic_info->tables[i];
144 if (!test_bit(i, &utn->missed))
147 for (j = 0; j < table->n_entries; j++)
148 if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
156 __udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
157 unsigned int idx, struct udp_tunnel_info *ti)
159 struct udp_tunnel_nic_table_entry *entry;
160 struct udp_tunnel_nic *utn;
162 utn = dev->udp_tunnel_nic;
163 entry = &utn->entries[table][idx];
166 udp_tunnel_nic_ti_from_entry(entry, ti);
170 __udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
171 unsigned int idx, u8 priv)
173 dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv;
177 udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry,
180 bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
182 WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
183 entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL);
185 if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
186 (!err || (err == -EEXIST && dodgy)))
187 entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD;
189 if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL &&
190 (!err || (err == -ENOENT && dodgy)))
191 entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL;
194 entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
196 entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
200 udp_tunnel_nic_device_sync_one(struct net_device *dev,
201 struct udp_tunnel_nic *utn,
202 unsigned int table, unsigned int idx)
204 struct udp_tunnel_nic_table_entry *entry;
205 struct udp_tunnel_info ti;
208 entry = &utn->entries[table][idx];
209 if (!udp_tunnel_nic_entry_is_queued(entry))
212 udp_tunnel_nic_ti_from_entry(entry, &ti);
213 if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD)
214 err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti);
216 err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx,
218 udp_tunnel_nic_entry_update_done(entry, err);
222 "UDP tunnel port sync failed port %d type %s: %d\n",
223 be16_to_cpu(entry->port),
224 udp_tunnel_nic_tunnel_type_name(entry->type),
229 udp_tunnel_nic_device_sync_by_port(struct net_device *dev,
230 struct udp_tunnel_nic *utn)
232 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
235 for (i = 0; i < utn->n_tables; i++)
236 for (j = 0; j < info->tables[i].n_entries; j++)
237 udp_tunnel_nic_device_sync_one(dev, utn, i, j);
241 udp_tunnel_nic_device_sync_by_table(struct net_device *dev,
242 struct udp_tunnel_nic *utn)
244 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
248 for (i = 0; i < utn->n_tables; i++) {
249 /* Find something that needs sync in this table */
250 for (j = 0; j < info->tables[i].n_entries; j++)
251 if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
253 if (j == info->tables[i].n_entries)
256 err = info->sync_table(dev, i);
258 netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n",
261 for (j = 0; j < info->tables[i].n_entries; j++) {
262 struct udp_tunnel_nic_table_entry *entry;
264 entry = &utn->entries[i][j];
265 if (udp_tunnel_nic_entry_is_queued(entry))
266 udp_tunnel_nic_entry_update_done(entry, err);
272 __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
277 if (dev->udp_tunnel_nic_info->sync_table)
278 udp_tunnel_nic_device_sync_by_table(dev, utn);
280 udp_tunnel_nic_device_sync_by_port(dev, utn);
283 /* Can't replay directly here, in case we come from the tunnel driver's
284 * notification - trying to replay may deadlock inside tunnel driver.
286 utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
290 udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
292 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
298 /* Drivers which sleep in the callback need to update from
299 * the workqueue, if we come from the tunnel driver's notification.
301 may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
303 __udp_tunnel_nic_device_sync(dev, utn);
304 if (may_sleep || utn->need_replay) {
305 queue_work(udp_tunnel_nic_workqueue, &utn->work);
306 utn->work_pending = 1;
311 udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table,
312 struct udp_tunnel_info *ti)
314 return table->tunnel_types & ti->type;
318 udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
319 struct udp_tunnel_info *ti)
321 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
324 /* Special case IPv4-only NICs */
325 if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY &&
326 ti->sa_family != AF_INET)
329 for (i = 0; i < utn->n_tables; i++)
330 if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti))
336 udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
337 struct udp_tunnel_info *ti)
339 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
340 struct udp_tunnel_nic_table_entry *entry;
343 for (i = 0; i < utn->n_tables; i++)
344 for (j = 0; j < info->tables[i].n_entries; j++) {
345 entry = &utn->entries[i][j];
347 if (!udp_tunnel_nic_entry_is_free(entry) &&
348 entry->port == ti->port &&
349 entry->type != ti->type) {
350 __set_bit(i, &utn->missed);
358 udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
359 unsigned int table, unsigned int idx, int use_cnt_adj)
361 struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
362 bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
363 unsigned int from, to;
365 /* If not going from used to unused or vice versa - all done.
366 * For dodgy entries make sure we try to sync again (queue the entry).
368 entry->use_cnt += use_cnt_adj;
369 if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj))
372 /* Cancel the op before it was sent to the device, if possible,
373 * otherwise we'd need to take special care to issue commands
374 * in the same order the ports arrived.
376 if (use_cnt_adj < 0) {
377 from = UDP_TUNNEL_NIC_ENTRY_ADD;
378 to = UDP_TUNNEL_NIC_ENTRY_DEL;
380 from = UDP_TUNNEL_NIC_ENTRY_DEL;
381 to = UDP_TUNNEL_NIC_ENTRY_ADD;
384 if (entry->flags & from) {
385 entry->flags &= ~from;
390 udp_tunnel_nic_entry_queue(utn, entry, to);
394 udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
395 unsigned int table, unsigned int idx,
396 struct udp_tunnel_info *ti, int use_cnt_adj)
398 struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
400 if (udp_tunnel_nic_entry_is_free(entry) ||
401 entry->port != ti->port ||
402 entry->type != ti->type)
405 if (udp_tunnel_nic_entry_is_frozen(entry))
408 udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
412 /* Try to find existing matching entry and adjust its use count, instead of
413 * adding a new one. Returns true if entry was found. In case of delete the
414 * entry may have gotten removed in the process, in which case it will be
415 * queued for removal.
418 udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
419 struct udp_tunnel_info *ti, int use_cnt_adj)
421 const struct udp_tunnel_nic_table_info *table;
424 for (i = 0; i < utn->n_tables; i++) {
425 table = &dev->udp_tunnel_nic_info->tables[i];
426 if (!udp_tunnel_nic_table_is_capable(table, ti))
429 for (j = 0; j < table->n_entries; j++)
430 if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
439 udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
440 struct udp_tunnel_info *ti)
442 return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
446 udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
447 struct udp_tunnel_info *ti)
449 return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
453 udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
454 struct udp_tunnel_info *ti)
456 const struct udp_tunnel_nic_table_info *table;
459 for (i = 0; i < utn->n_tables; i++) {
460 table = &dev->udp_tunnel_nic_info->tables[i];
461 if (!udp_tunnel_nic_table_is_capable(table, ti))
464 for (j = 0; j < table->n_entries; j++) {
465 struct udp_tunnel_nic_table_entry *entry;
467 entry = &utn->entries[i][j];
468 if (!udp_tunnel_nic_entry_is_free(entry))
471 entry->port = ti->port;
472 entry->type = ti->type;
474 udp_tunnel_nic_entry_queue(utn, entry,
475 UDP_TUNNEL_NIC_ENTRY_ADD);
479 /* The different table may still fit this port in, but there
480 * are no devices currently which have multiple tables accepting
481 * the same tunnel type, and false positives are okay.
483 __set_bit(i, &utn->missed);
490 __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
492 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
493 struct udp_tunnel_nic *utn;
495 utn = dev->udp_tunnel_nic;
498 if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)
501 if (!udp_tunnel_nic_is_capable(dev, utn, ti))
504 /* It may happen that a tunnel of one type is removed and different
505 * tunnel type tries to reuse its port before the device was informed.
506 * Rely on utn->missed to re-add this port later.
508 if (udp_tunnel_nic_has_collision(dev, utn, ti))
511 if (!udp_tunnel_nic_add_existing(dev, utn, ti))
512 udp_tunnel_nic_add_new(dev, utn, ti);
514 udp_tunnel_nic_device_sync(dev, utn);
518 __udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
520 struct udp_tunnel_nic *utn;
522 utn = dev->udp_tunnel_nic;
526 if (!udp_tunnel_nic_is_capable(dev, utn, ti))
529 udp_tunnel_nic_del_existing(dev, utn, ti);
531 udp_tunnel_nic_device_sync(dev, utn);
534 static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
536 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
537 struct udp_tunnel_nic *utn;
542 utn = dev->udp_tunnel_nic;
546 utn->need_sync = false;
547 for (i = 0; i < utn->n_tables; i++)
548 for (j = 0; j < info->tables[i].n_entries; j++) {
549 struct udp_tunnel_nic_table_entry *entry;
551 entry = &utn->entries[i][j];
553 entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
554 UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
555 /* We don't release rtnl across ops */
556 WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
560 udp_tunnel_nic_entry_queue(utn, entry,
561 UDP_TUNNEL_NIC_ENTRY_ADD);
564 __udp_tunnel_nic_device_sync(dev, utn);
567 static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
568 .get_port = __udp_tunnel_nic_get_port,
569 .set_port_priv = __udp_tunnel_nic_set_port_priv,
570 .add_port = __udp_tunnel_nic_add_port,
571 .del_port = __udp_tunnel_nic_del_port,
572 .reset_ntf = __udp_tunnel_nic_reset_ntf,
576 udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
578 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
581 for (i = 0; i < utn->n_tables; i++)
582 for (j = 0; j < info->tables[i].n_entries; j++) {
583 int adj_cnt = -utn->entries[i][j].use_cnt;
586 udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
589 __udp_tunnel_nic_device_sync(dev, utn);
591 for (i = 0; i < utn->n_tables; i++)
592 memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
593 sizeof(**utn->entries)));
594 WARN_ON(utn->need_sync);
595 utn->need_replay = 0;
599 udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
601 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
604 /* Freeze all the ports we are already tracking so that the replay
605 * does not double up the refcount.
607 for (i = 0; i < utn->n_tables; i++)
608 for (j = 0; j < info->tables[i].n_entries; j++)
609 udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
611 utn->need_replay = 0;
613 udp_tunnel_get_rx_info(dev);
615 for (i = 0; i < utn->n_tables; i++)
616 for (j = 0; j < info->tables[i].n_entries; j++)
617 udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
620 static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
622 struct udp_tunnel_nic *utn =
623 container_of(work, struct udp_tunnel_nic, work);
626 utn->work_pending = 0;
627 __udp_tunnel_nic_device_sync(utn->dev, utn);
629 if (utn->need_replay)
630 udp_tunnel_nic_replay(utn->dev, utn);
634 static struct udp_tunnel_nic *
635 udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
636 unsigned int n_tables)
638 struct udp_tunnel_nic *utn;
641 utn = kzalloc(sizeof(*utn), GFP_KERNEL);
644 utn->n_tables = n_tables;
645 INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
647 utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
651 for (i = 0; i < n_tables; i++) {
652 utn->entries[i] = kcalloc(info->tables[i].n_entries,
653 sizeof(*utn->entries[i]), GFP_KERNEL);
654 if (!utn->entries[i])
655 goto err_free_prev_entries;
660 err_free_prev_entries:
662 kfree(utn->entries[i]);
669 static int udp_tunnel_nic_register(struct net_device *dev)
671 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
672 struct udp_tunnel_nic *utn;
673 unsigned int n_tables, i;
675 BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
676 UDP_TUNNEL_NIC_MAX_TABLES);
678 if (WARN_ON(!info->set_port != !info->unset_port) ||
679 WARN_ON(!info->set_port == !info->sync_table) ||
680 WARN_ON(!info->tables[0].n_entries))
684 for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
685 if (!info->tables[i].n_entries)
689 if (WARN_ON(!info->tables[i - 1].n_entries))
693 utn = udp_tunnel_nic_alloc(info, n_tables);
699 dev->udp_tunnel_nic = utn;
701 if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
702 udp_tunnel_get_rx_info(dev);
708 udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
712 /* Flush before we check work, so we don't waste time adding entries
713 * from the work which we will boot immediately.
715 udp_tunnel_nic_flush(dev, utn);
717 /* Wait for the work to be done using the state, netdev core will
718 * retry unregister until we give up our reference on this device.
720 if (utn->work_pending)
723 for (i = 0; i < utn->n_tables; i++)
724 kfree(utn->entries[i]);
727 dev->udp_tunnel_nic = NULL;
732 udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
733 unsigned long event, void *ptr)
735 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
736 const struct udp_tunnel_nic_info *info;
737 struct udp_tunnel_nic *utn;
739 info = dev->udp_tunnel_nic_info;
743 if (event == NETDEV_REGISTER) {
746 err = udp_tunnel_nic_register(dev);
748 netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
749 return notifier_from_errno(err);
751 /* All other events will need the udp_tunnel_nic state */
752 utn = dev->udp_tunnel_nic;
756 if (event == NETDEV_UNREGISTER) {
757 udp_tunnel_nic_unregister(dev, utn);
761 /* All other events only matter if NIC has to be programmed open */
762 if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
765 if (event == NETDEV_UP) {
766 WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
767 udp_tunnel_get_rx_info(dev);
770 if (event == NETDEV_GOING_DOWN) {
771 udp_tunnel_nic_flush(dev, utn);
778 static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = {
779 .notifier_call = udp_tunnel_nic_netdevice_event,
782 static int __init udp_tunnel_nic_init_module(void)
786 udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
787 if (!udp_tunnel_nic_workqueue)
791 udp_tunnel_nic_ops = &__udp_tunnel_nic_ops;
794 err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block);
802 udp_tunnel_nic_ops = NULL;
804 destroy_workqueue(udp_tunnel_nic_workqueue);
807 late_initcall(udp_tunnel_nic_init_module);
809 static void __exit udp_tunnel_nic_cleanup_module(void)
811 unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block);
814 udp_tunnel_nic_ops = NULL;
817 destroy_workqueue(udp_tunnel_nic_workqueue);
819 module_exit(udp_tunnel_nic_cleanup_module);
821 MODULE_LICENSE("GPL");