1 // SPDX-License-Identifier: GPL-2.0-or-later
4 The purpose of this driver is to provide a device that allows
5 for sharing of resources:
7 1) qdiscs/policies that are per device as opposed to system wide.
8 ifb allows for a device which can be redirected to thus providing
9 an impression of sharing.
11 2) Allows for queueing incoming traffic for shaping instead of
14 The original concept is based on what is known as the IMQ
15 driver initially written by Martin Devera, later rewritten
16 by Patrick McHardy and then maintained by Andre Correa.
18 You need the tc action mirror or redirect to feed this device
22 Authors: Jamal Hadi Salim (2005)
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/init.h>
32 #include <linux/interrupt.h>
33 #include <linux/moduleparam.h>
34 #include <linux/netfilter_netdev.h>
35 #include <net/pkt_sched.h>
36 #include <net/net_namespace.h>
39 struct ifb_q_private {
40 struct net_device *dev;
41 struct tasklet_struct ifb_tasklet;
44 struct sk_buff_head rq;
47 struct u64_stats_sync rsync;
49 struct u64_stats_sync tsync;
52 struct sk_buff_head tq;
53 } ____cacheline_aligned_in_smp;
55 struct ifb_dev_private {
56 struct ifb_q_private *tx_private;
59 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
60 static int ifb_open(struct net_device *dev);
61 static int ifb_close(struct net_device *dev);
63 static void ifb_ri_tasklet(struct tasklet_struct *t)
65 struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet);
66 struct netdev_queue *txq;
69 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
70 skb = skb_peek(&txp->tq);
72 if (!__netif_tx_trylock(txq))
74 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
75 __netif_tx_unlock(txq);
78 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
79 /* Skip tc and netfilter to prevent redirection loop. */
81 #ifdef CONFIG_NET_CLS_ACT
82 skb->tc_skip_classify = 1;
84 nf_skip_egress(skb, true);
86 u64_stats_update_begin(&txp->tsync);
88 txp->tx_bytes += skb->len;
89 u64_stats_update_end(&txp->tsync);
92 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
96 txp->dev->stats.tx_dropped++;
97 if (skb_queue_len(&txp->tq) != 0)
102 skb->skb_iif = txp->dev->ifindex;
104 if (!skb->from_ingress) {
107 skb_pull_rcsum(skb, skb->mac_len);
108 netif_receive_skb(skb);
112 if (__netif_tx_trylock(txq)) {
113 skb = skb_peek(&txp->rq);
115 txp->tasklet_pending = 0;
116 if (netif_tx_queue_stopped(txq))
117 netif_tx_wake_queue(txq);
119 __netif_tx_unlock(txq);
122 __netif_tx_unlock(txq);
125 txp->tasklet_pending = 1;
126 tasklet_schedule(&txp->ifb_tasklet);
131 static void ifb_stats64(struct net_device *dev,
132 struct rtnl_link_stats64 *stats)
134 struct ifb_dev_private *dp = netdev_priv(dev);
135 struct ifb_q_private *txp = dp->tx_private;
140 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
142 start = u64_stats_fetch_begin_irq(&txp->rsync);
143 packets = txp->rx_packets;
144 bytes = txp->rx_bytes;
145 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
146 stats->rx_packets += packets;
147 stats->rx_bytes += bytes;
150 start = u64_stats_fetch_begin_irq(&txp->tsync);
151 packets = txp->tx_packets;
152 bytes = txp->tx_bytes;
153 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
154 stats->tx_packets += packets;
155 stats->tx_bytes += bytes;
157 stats->rx_dropped = dev->stats.rx_dropped;
158 stats->tx_dropped = dev->stats.tx_dropped;
161 static int ifb_dev_init(struct net_device *dev)
163 struct ifb_dev_private *dp = netdev_priv(dev);
164 struct ifb_q_private *txp;
167 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
170 dp->tx_private = txp;
171 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
174 __skb_queue_head_init(&txp->rq);
175 __skb_queue_head_init(&txp->tq);
176 u64_stats_init(&txp->rsync);
177 u64_stats_init(&txp->tsync);
178 tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet);
179 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
184 static const struct net_device_ops ifb_netdev_ops = {
185 .ndo_open = ifb_open,
186 .ndo_stop = ifb_close,
187 .ndo_get_stats64 = ifb_stats64,
188 .ndo_start_xmit = ifb_xmit,
189 .ndo_validate_addr = eth_validate_addr,
190 .ndo_init = ifb_dev_init,
193 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
194 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
195 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
196 NETIF_F_HW_VLAN_STAG_TX)
198 static void ifb_dev_free(struct net_device *dev)
200 struct ifb_dev_private *dp = netdev_priv(dev);
201 struct ifb_q_private *txp = dp->tx_private;
204 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
205 tasklet_kill(&txp->ifb_tasklet);
206 __skb_queue_purge(&txp->rq);
207 __skb_queue_purge(&txp->tq);
209 kfree(dp->tx_private);
212 static void ifb_setup(struct net_device *dev)
214 /* Initialize the device structure. */
215 dev->netdev_ops = &ifb_netdev_ops;
217 /* Fill in device structure with ethernet-generic values. */
219 dev->tx_queue_len = TX_Q_LIMIT;
221 dev->features |= IFB_FEATURES;
222 dev->hw_features |= dev->features;
223 dev->hw_enc_features |= dev->features;
224 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
225 NETIF_F_HW_VLAN_STAG_TX);
227 dev->flags |= IFF_NOARP;
228 dev->flags &= ~IFF_MULTICAST;
229 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
231 eth_hw_addr_random(dev);
232 dev->needs_free_netdev = true;
233 dev->priv_destructor = ifb_dev_free;
239 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
241 struct ifb_dev_private *dp = netdev_priv(dev);
242 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
244 u64_stats_update_begin(&txp->rsync);
246 txp->rx_bytes += skb->len;
247 u64_stats_update_end(&txp->rsync);
249 if (!skb->redirected || !skb->skb_iif) {
251 dev->stats.rx_dropped++;
255 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
256 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
258 __skb_queue_tail(&txp->rq, skb);
259 if (!txp->tasklet_pending) {
260 txp->tasklet_pending = 1;
261 tasklet_schedule(&txp->ifb_tasklet);
267 static int ifb_close(struct net_device *dev)
269 netif_tx_stop_all_queues(dev);
273 static int ifb_open(struct net_device *dev)
275 netif_tx_start_all_queues(dev);
279 static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
280 struct netlink_ext_ack *extack)
282 if (tb[IFLA_ADDRESS]) {
283 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
285 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
286 return -EADDRNOTAVAIL;
291 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
293 .priv_size = sizeof(struct ifb_dev_private),
295 .validate = ifb_validate,
298 /* Number of ifb devices to be set up by this module.
299 * Note that these legacy devices have one queue.
300 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
302 static int numifbs = 2;
303 module_param(numifbs, int, 0);
304 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
306 static int __init ifb_init_one(int index)
308 struct net_device *dev_ifb;
311 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
312 NET_NAME_UNKNOWN, ifb_setup);
317 dev_ifb->rtnl_link_ops = &ifb_link_ops;
318 err = register_netdevice(dev_ifb);
325 free_netdev(dev_ifb);
329 static int __init ifb_init_module(void)
333 down_write(&pernet_ops_rwsem);
335 err = __rtnl_link_register(&ifb_link_ops);
339 for (i = 0; i < numifbs && !err; i++) {
340 err = ifb_init_one(i);
344 __rtnl_link_unregister(&ifb_link_ops);
348 up_write(&pernet_ops_rwsem);
353 static void __exit ifb_cleanup_module(void)
355 rtnl_link_unregister(&ifb_link_ops);
358 module_init(ifb_init_module);
359 module_exit(ifb_cleanup_module);
360 MODULE_LICENSE("GPL");
361 MODULE_AUTHOR("Jamal Hadi Salim");
362 MODULE_ALIAS_RTNL_LINK("ifb");