4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
11 #include <linux/netdevice.h>
12 #include <linux/slab.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/u64_stats_sync.h>
17 #include <net/rtnetlink.h>
21 #include <linux/veth.h>
22 #include <linux/module.h>
23 #include <linux/bpf.h>
24 #include <linux/filter.h>
25 #include <linux/ptr_ring.h>
26 #include <linux/bpf_trace.h>
27 #include <linux/net_tstamp.h>
29 #define DRV_NAME "veth"
30 #define DRV_VERSION "1.0"
32 #define VETH_XDP_FLAG BIT(0)
33 #define VETH_RING_SIZE 256
34 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36 /* Separating two types of XDP xmit */
37 #define VETH_XDP_TX BIT(0)
38 #define VETH_XDP_REDIR BIT(1)
41 struct napi_struct xdp_napi;
42 struct net_device *dev;
43 struct bpf_prog __rcu *xdp_prog;
44 struct xdp_mem_info xdp_mem;
45 bool rx_notify_masked;
46 struct ptr_ring xdp_ring;
47 struct xdp_rxq_info xdp_rxq;
51 struct net_device __rcu *peer;
53 struct bpf_prog *_xdp_prog;
55 unsigned int requested_headroom;
63 const char string[ETH_GSTRING_LEN];
64 } ethtool_stats_keys[] = {
68 static int veth_get_link_ksettings(struct net_device *dev,
69 struct ethtool_link_ksettings *cmd)
71 cmd->base.speed = SPEED_10000;
72 cmd->base.duplex = DUPLEX_FULL;
73 cmd->base.port = PORT_TP;
74 cmd->base.autoneg = AUTONEG_DISABLE;
78 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
80 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
81 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
84 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
88 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
93 static int veth_get_sset_count(struct net_device *dev, int sset)
97 return ARRAY_SIZE(ethtool_stats_keys);
103 static void veth_get_ethtool_stats(struct net_device *dev,
104 struct ethtool_stats *stats, u64 *data)
106 struct veth_priv *priv = netdev_priv(dev);
107 struct net_device *peer = rtnl_dereference(priv->peer);
109 data[0] = peer ? peer->ifindex : 0;
112 static int veth_get_ts_info(struct net_device *dev,
113 struct ethtool_ts_info *info)
115 info->so_timestamping =
116 SOF_TIMESTAMPING_TX_SOFTWARE |
117 SOF_TIMESTAMPING_RX_SOFTWARE |
118 SOF_TIMESTAMPING_SOFTWARE;
119 info->phc_index = -1;
124 static const struct ethtool_ops veth_ethtool_ops = {
125 .get_drvinfo = veth_get_drvinfo,
126 .get_link = ethtool_op_get_link,
127 .get_strings = veth_get_strings,
128 .get_sset_count = veth_get_sset_count,
129 .get_ethtool_stats = veth_get_ethtool_stats,
130 .get_link_ksettings = veth_get_link_ksettings,
131 .get_ts_info = veth_get_ts_info,
134 /* general routines */
136 static bool veth_is_xdp_frame(void *ptr)
138 return (unsigned long)ptr & VETH_XDP_FLAG;
141 static void *veth_ptr_to_xdp(void *ptr)
143 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
146 static void *veth_xdp_to_ptr(void *ptr)
148 return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
151 static void veth_ptr_free(void *ptr)
153 if (veth_is_xdp_frame(ptr))
154 xdp_return_frame(veth_ptr_to_xdp(ptr));
159 static void __veth_xdp_flush(struct veth_rq *rq)
161 /* Write ptr_ring before reading rx_notify_masked */
163 if (!rq->rx_notify_masked) {
164 rq->rx_notify_masked = true;
165 napi_schedule(&rq->xdp_napi);
169 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
171 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
172 dev_kfree_skb_any(skb);
176 return NET_RX_SUCCESS;
179 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
180 struct veth_rq *rq, bool xdp)
182 return __dev_forward_skb(dev, skb) ?: xdp ?
183 veth_xdp_rx(rq, skb) :
187 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
189 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
190 struct veth_rq *rq = NULL;
191 struct net_device *rcv;
192 int length = skb->len;
193 bool rcv_xdp = false;
197 rcv = rcu_dereference(priv->peer);
198 if (unlikely(!rcv)) {
203 rcv_priv = netdev_priv(rcv);
204 rxq = skb_get_queue_mapping(skb);
205 if (rxq < rcv->real_num_rx_queues) {
206 rq = &rcv_priv->rq[rxq];
207 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
209 skb_record_rx_queue(skb, rxq);
212 skb_tx_timestamp(skb);
213 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
214 struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
216 u64_stats_update_begin(&stats->syncp);
217 stats->bytes += length;
219 u64_stats_update_end(&stats->syncp);
222 atomic64_inc(&priv->dropped);
226 __veth_xdp_flush(rq);
233 static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev)
235 struct veth_priv *priv = netdev_priv(dev);
240 for_each_possible_cpu(cpu) {
241 struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
246 start = u64_stats_fetch_begin_irq(&stats->syncp);
247 packets = stats->packets;
248 bytes = stats->bytes;
249 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
250 result->packets += packets;
251 result->bytes += bytes;
253 return atomic64_read(&priv->dropped);
256 static void veth_get_stats64(struct net_device *dev,
257 struct rtnl_link_stats64 *tot)
259 struct veth_priv *priv = netdev_priv(dev);
260 struct net_device *peer;
261 struct pcpu_lstats one;
263 tot->tx_dropped = veth_stats_one(&one, dev);
264 tot->tx_bytes = one.bytes;
265 tot->tx_packets = one.packets;
268 peer = rcu_dereference(priv->peer);
270 tot->rx_dropped = veth_stats_one(&one, peer);
271 tot->rx_bytes = one.bytes;
272 tot->rx_packets = one.packets;
277 /* fake multicast ability */
278 static void veth_set_multicast_list(struct net_device *dev)
282 static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
288 buflen = SKB_DATA_ALIGN(headroom + len) +
289 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
291 skb = build_skb(head, buflen);
295 skb_reserve(skb, headroom);
301 static int veth_select_rxq(struct net_device *dev)
303 return smp_processor_id() % dev->real_num_rx_queues;
306 static int veth_xdp_xmit(struct net_device *dev, int n,
307 struct xdp_frame **frames, u32 flags)
309 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
310 struct net_device *rcv;
311 unsigned int max_len;
315 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
318 rcv = rcu_dereference(priv->peer);
322 rcv_priv = netdev_priv(rcv);
323 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
324 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
325 * side. This means an XDP program is loaded on the peer and the peer
328 if (!rcu_access_pointer(rq->xdp_prog))
331 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
333 spin_lock(&rq->xdp_ring.producer_lock);
334 for (i = 0; i < n; i++) {
335 struct xdp_frame *frame = frames[i];
336 void *ptr = veth_xdp_to_ptr(frame);
338 if (unlikely(frame->len > max_len ||
339 __ptr_ring_produce(&rq->xdp_ring, ptr))) {
340 xdp_return_frame_rx_napi(frame);
344 spin_unlock(&rq->xdp_ring.producer_lock);
346 if (flags & XDP_XMIT_FLUSH)
347 __veth_xdp_flush(rq);
352 static void veth_xdp_flush(struct net_device *dev)
354 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
355 struct net_device *rcv;
359 rcv = rcu_dereference(priv->peer);
363 rcv_priv = netdev_priv(rcv);
364 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
365 /* xdp_ring is initialized on receive side? */
366 if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
369 __veth_xdp_flush(rq);
374 static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
376 struct xdp_frame *frame = convert_to_xdp_frame(xdp);
378 if (unlikely(!frame))
381 return veth_xdp_xmit(dev, 1, &frame, 0);
384 static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
385 struct xdp_frame *frame,
386 unsigned int *xdp_xmit)
388 void *hard_start = frame->data - frame->headroom;
389 void *head = hard_start - sizeof(struct xdp_frame);
390 int len = frame->len, delta = 0;
391 struct xdp_frame orig_frame;
392 struct bpf_prog *xdp_prog;
393 unsigned int headroom;
397 xdp_prog = rcu_dereference(rq->xdp_prog);
398 if (likely(xdp_prog)) {
402 xdp.data_hard_start = hard_start;
403 xdp.data = frame->data;
404 xdp.data_end = frame->data + frame->len;
405 xdp.data_meta = frame->data - frame->metasize;
406 xdp.rxq = &rq->xdp_rxq;
408 act = bpf_prog_run_xdp(xdp_prog, &xdp);
412 delta = frame->data - xdp.data;
413 len = xdp.data_end - xdp.data;
417 xdp.data_hard_start = head;
418 xdp.rxq->mem = frame->mem;
419 if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
420 trace_xdp_exception(rq->dev, xdp_prog, act);
424 *xdp_xmit |= VETH_XDP_TX;
429 xdp.data_hard_start = head;
430 xdp.rxq->mem = frame->mem;
431 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
435 *xdp_xmit |= VETH_XDP_REDIR;
439 bpf_warn_invalid_xdp_action(act);
441 trace_xdp_exception(rq->dev, xdp_prog, act);
448 headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
449 skb = veth_build_skb(head, headroom, len, 0);
451 xdp_return_frame(frame);
455 xdp_scrub_frame(frame);
456 skb->protocol = eth_type_trans(skb, rq->dev);
461 xdp_return_frame(frame);
466 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
467 unsigned int *xdp_xmit)
469 u32 pktlen, headroom, act, metalen;
470 void *orig_data, *orig_data_end;
471 struct bpf_prog *xdp_prog;
472 int mac_len, delta, off;
478 xdp_prog = rcu_dereference(rq->xdp_prog);
479 if (unlikely(!xdp_prog)) {
484 mac_len = skb->data - skb_mac_header(skb);
485 pktlen = skb->len + mac_len;
486 headroom = skb_headroom(skb) - mac_len;
488 if (skb_shared(skb) || skb_head_is_locked(skb) ||
489 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
490 struct sk_buff *nskb;
495 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
496 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
497 if (size > PAGE_SIZE)
500 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
504 head = page_address(page);
505 start = head + VETH_XDP_HEADROOM;
506 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
507 page_frag_free(head);
511 nskb = veth_build_skb(head,
512 VETH_XDP_HEADROOM + mac_len, skb->len,
515 page_frag_free(head);
519 skb_copy_header(nskb, skb);
520 head_off = skb_headroom(nskb) - skb_headroom(skb);
521 skb_headers_offset_update(nskb, head_off);
526 xdp.data_hard_start = skb->head;
527 xdp.data = skb_mac_header(skb);
528 xdp.data_end = xdp.data + pktlen;
529 xdp.data_meta = xdp.data;
530 xdp.rxq = &rq->xdp_rxq;
531 orig_data = xdp.data;
532 orig_data_end = xdp.data_end;
534 act = bpf_prog_run_xdp(xdp_prog, &xdp);
540 get_page(virt_to_page(xdp.data));
542 xdp.rxq->mem = rq->xdp_mem;
543 if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
544 trace_xdp_exception(rq->dev, xdp_prog, act);
547 *xdp_xmit |= VETH_XDP_TX;
551 get_page(virt_to_page(xdp.data));
553 xdp.rxq->mem = rq->xdp_mem;
554 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
556 *xdp_xmit |= VETH_XDP_REDIR;
560 bpf_warn_invalid_xdp_action(act);
562 trace_xdp_exception(rq->dev, xdp_prog, act);
568 delta = orig_data - xdp.data;
569 off = mac_len + delta;
571 __skb_push(skb, off);
573 __skb_pull(skb, -off);
574 skb->mac_header -= delta;
575 off = xdp.data_end - orig_data_end;
578 skb->protocol = eth_type_trans(skb, rq->dev);
580 metalen = xdp.data - xdp.data_meta;
582 skb_metadata_set(skb, metalen);
591 page_frag_free(xdp.data);
596 static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
600 for (i = 0; i < budget; i++) {
601 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
607 if (veth_is_xdp_frame(ptr)) {
608 skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr),
611 skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit);
615 napi_gro_receive(&rq->xdp_napi, skb);
623 static int veth_poll(struct napi_struct *napi, int budget)
626 container_of(napi, struct veth_rq, xdp_napi);
627 unsigned int xdp_xmit = 0;
630 xdp_set_return_frame_no_direct();
631 done = veth_xdp_rcv(rq, budget, &xdp_xmit);
633 if (done < budget && napi_complete_done(napi, done)) {
634 /* Write rx_notify_masked before reading ptr_ring */
635 smp_store_mb(rq->rx_notify_masked, false);
636 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
637 rq->rx_notify_masked = true;
638 napi_schedule(&rq->xdp_napi);
642 if (xdp_xmit & VETH_XDP_TX)
643 veth_xdp_flush(rq->dev);
644 if (xdp_xmit & VETH_XDP_REDIR)
646 xdp_clear_return_frame_no_direct();
651 static int veth_napi_add(struct net_device *dev)
653 struct veth_priv *priv = netdev_priv(dev);
656 for (i = 0; i < dev->real_num_rx_queues; i++) {
657 struct veth_rq *rq = &priv->rq[i];
659 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
664 for (i = 0; i < dev->real_num_rx_queues; i++) {
665 struct veth_rq *rq = &priv->rq[i];
667 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
668 napi_enable(&rq->xdp_napi);
673 for (i--; i >= 0; i--)
674 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
679 static void veth_napi_del(struct net_device *dev)
681 struct veth_priv *priv = netdev_priv(dev);
684 for (i = 0; i < dev->real_num_rx_queues; i++) {
685 struct veth_rq *rq = &priv->rq[i];
687 napi_disable(&rq->xdp_napi);
688 napi_hash_del(&rq->xdp_napi);
692 for (i = 0; i < dev->real_num_rx_queues; i++) {
693 struct veth_rq *rq = &priv->rq[i];
695 netif_napi_del(&rq->xdp_napi);
696 rq->rx_notify_masked = false;
697 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
701 static int veth_enable_xdp(struct net_device *dev)
703 struct veth_priv *priv = netdev_priv(dev);
706 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
707 for (i = 0; i < dev->real_num_rx_queues; i++) {
708 struct veth_rq *rq = &priv->rq[i];
710 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
714 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
715 MEM_TYPE_PAGE_SHARED,
720 /* Save original mem info as it can be overwritten */
721 rq->xdp_mem = rq->xdp_rxq.mem;
724 err = veth_napi_add(dev);
729 for (i = 0; i < dev->real_num_rx_queues; i++)
730 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
734 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
736 for (i--; i >= 0; i--)
737 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
742 static void veth_disable_xdp(struct net_device *dev)
744 struct veth_priv *priv = netdev_priv(dev);
747 for (i = 0; i < dev->real_num_rx_queues; i++)
748 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
750 for (i = 0; i < dev->real_num_rx_queues; i++) {
751 struct veth_rq *rq = &priv->rq[i];
753 rq->xdp_rxq.mem = rq->xdp_mem;
754 xdp_rxq_info_unreg(&rq->xdp_rxq);
758 static int veth_open(struct net_device *dev)
760 struct veth_priv *priv = netdev_priv(dev);
761 struct net_device *peer = rtnl_dereference(priv->peer);
767 if (priv->_xdp_prog) {
768 err = veth_enable_xdp(dev);
773 if (peer->flags & IFF_UP) {
774 netif_carrier_on(dev);
775 netif_carrier_on(peer);
781 static int veth_close(struct net_device *dev)
783 struct veth_priv *priv = netdev_priv(dev);
784 struct net_device *peer = rtnl_dereference(priv->peer);
786 netif_carrier_off(dev);
788 netif_carrier_off(peer);
791 veth_disable_xdp(dev);
796 static int is_valid_veth_mtu(int mtu)
798 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
801 static int veth_alloc_queues(struct net_device *dev)
803 struct veth_priv *priv = netdev_priv(dev);
806 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
810 for (i = 0; i < dev->num_rx_queues; i++)
811 priv->rq[i].dev = dev;
816 static void veth_free_queues(struct net_device *dev)
818 struct veth_priv *priv = netdev_priv(dev);
823 static int veth_dev_init(struct net_device *dev)
827 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
831 err = veth_alloc_queues(dev);
833 free_percpu(dev->lstats);
840 static void veth_dev_free(struct net_device *dev)
842 veth_free_queues(dev);
843 free_percpu(dev->lstats);
846 #ifdef CONFIG_NET_POLL_CONTROLLER
847 static void veth_poll_controller(struct net_device *dev)
849 /* veth only receives frames when its peer sends one
850 * Since it has nothing to do with disabling irqs, we are guaranteed
851 * never to have pending data when we poll for it so
852 * there is nothing to do here.
854 * We need this though so netpoll recognizes us as an interface that
855 * supports polling, which enables bridge devices in virt setups to
856 * still use netconsole
859 #endif /* CONFIG_NET_POLL_CONTROLLER */
861 static int veth_get_iflink(const struct net_device *dev)
863 struct veth_priv *priv = netdev_priv(dev);
864 struct net_device *peer;
868 peer = rcu_dereference(priv->peer);
869 iflink = peer ? peer->ifindex : 0;
875 static netdev_features_t veth_fix_features(struct net_device *dev,
876 netdev_features_t features)
878 struct veth_priv *priv = netdev_priv(dev);
879 struct net_device *peer;
881 peer = rtnl_dereference(priv->peer);
883 struct veth_priv *peer_priv = netdev_priv(peer);
885 if (peer_priv->_xdp_prog)
886 features &= ~NETIF_F_GSO_SOFTWARE;
892 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
894 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
895 struct net_device *peer;
901 peer = rcu_dereference(priv->peer);
905 peer_priv = netdev_priv(peer);
906 priv->requested_headroom = new_hr;
907 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
908 dev->needed_headroom = new_hr;
909 peer->needed_headroom = new_hr;
915 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
916 struct netlink_ext_ack *extack)
918 struct veth_priv *priv = netdev_priv(dev);
919 struct bpf_prog *old_prog;
920 struct net_device *peer;
921 unsigned int max_mtu;
924 old_prog = priv->_xdp_prog;
925 priv->_xdp_prog = prog;
926 peer = rtnl_dereference(priv->peer);
930 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
935 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
936 peer->hard_header_len -
937 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
938 if (peer->mtu > max_mtu) {
939 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
944 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
945 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
950 if (dev->flags & IFF_UP) {
951 err = veth_enable_xdp(dev);
953 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
959 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
960 peer->max_mtu = max_mtu;
966 if (dev->flags & IFF_UP)
967 veth_disable_xdp(dev);
970 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
971 peer->max_mtu = ETH_MAX_MTU;
974 bpf_prog_put(old_prog);
977 if ((!!old_prog ^ !!prog) && peer)
978 netdev_update_features(peer);
982 priv->_xdp_prog = old_prog;
987 static u32 veth_xdp_query(struct net_device *dev)
989 struct veth_priv *priv = netdev_priv(dev);
990 const struct bpf_prog *xdp_prog;
992 xdp_prog = priv->_xdp_prog;
994 return xdp_prog->aux->id;
999 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1001 switch (xdp->command) {
1002 case XDP_SETUP_PROG:
1003 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1004 case XDP_QUERY_PROG:
1005 xdp->prog_id = veth_xdp_query(dev);
1012 static const struct net_device_ops veth_netdev_ops = {
1013 .ndo_init = veth_dev_init,
1014 .ndo_open = veth_open,
1015 .ndo_stop = veth_close,
1016 .ndo_start_xmit = veth_xmit,
1017 .ndo_get_stats64 = veth_get_stats64,
1018 .ndo_set_rx_mode = veth_set_multicast_list,
1019 .ndo_set_mac_address = eth_mac_addr,
1020 #ifdef CONFIG_NET_POLL_CONTROLLER
1021 .ndo_poll_controller = veth_poll_controller,
1023 .ndo_get_iflink = veth_get_iflink,
1024 .ndo_fix_features = veth_fix_features,
1025 .ndo_features_check = passthru_features_check,
1026 .ndo_set_rx_headroom = veth_set_rx_headroom,
1027 .ndo_bpf = veth_xdp,
1028 .ndo_xdp_xmit = veth_xdp_xmit,
1031 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1032 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1033 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1034 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1035 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1037 static void veth_setup(struct net_device *dev)
1041 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1042 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1043 dev->priv_flags |= IFF_NO_QUEUE;
1044 dev->priv_flags |= IFF_PHONY_HEADROOM;
1046 dev->netdev_ops = &veth_netdev_ops;
1047 dev->ethtool_ops = &veth_ethtool_ops;
1048 dev->features |= NETIF_F_LLTX;
1049 dev->features |= VETH_FEATURES;
1050 dev->vlan_features = dev->features &
1051 ~(NETIF_F_HW_VLAN_CTAG_TX |
1052 NETIF_F_HW_VLAN_STAG_TX |
1053 NETIF_F_HW_VLAN_CTAG_RX |
1054 NETIF_F_HW_VLAN_STAG_RX);
1055 dev->needs_free_netdev = true;
1056 dev->priv_destructor = veth_dev_free;
1057 dev->max_mtu = ETH_MAX_MTU;
1059 dev->hw_features = VETH_FEATURES;
1060 dev->hw_enc_features = VETH_FEATURES;
1061 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1068 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1069 struct netlink_ext_ack *extack)
1071 if (tb[IFLA_ADDRESS]) {
1072 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1074 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1075 return -EADDRNOTAVAIL;
1078 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1084 static struct rtnl_link_ops veth_link_ops;
1086 static int veth_newlink(struct net *src_net, struct net_device *dev,
1087 struct nlattr *tb[], struct nlattr *data[],
1088 struct netlink_ext_ack *extack)
1091 struct net_device *peer;
1092 struct veth_priv *priv;
1093 char ifname[IFNAMSIZ];
1094 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1095 unsigned char name_assign_type;
1096 struct ifinfomsg *ifmp;
1100 * create and register peer first
1102 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1103 struct nlattr *nla_peer;
1105 nla_peer = data[VETH_INFO_PEER];
1106 ifmp = nla_data(nla_peer);
1107 err = rtnl_nla_parse_ifla(peer_tb,
1108 nla_data(nla_peer) + sizeof(struct ifinfomsg),
1109 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1114 err = veth_validate(peer_tb, NULL, extack);
1124 if (ifmp && tbp[IFLA_IFNAME]) {
1125 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1126 name_assign_type = NET_NAME_USER;
1128 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1129 name_assign_type = NET_NAME_ENUM;
1132 net = rtnl_link_get_net(src_net, tbp);
1134 return PTR_ERR(net);
1136 peer = rtnl_create_link(net, ifname, name_assign_type,
1137 &veth_link_ops, tbp);
1140 return PTR_ERR(peer);
1143 if (!ifmp || !tbp[IFLA_ADDRESS])
1144 eth_hw_addr_random(peer);
1146 if (ifmp && (dev->ifindex != 0))
1147 peer->ifindex = ifmp->ifi_index;
1149 peer->gso_max_size = dev->gso_max_size;
1150 peer->gso_max_segs = dev->gso_max_segs;
1152 err = register_netdevice(peer);
1156 goto err_register_peer;
1158 netif_carrier_off(peer);
1160 err = rtnl_configure_link(peer, ifmp);
1162 goto err_configure_peer;
1167 * note, that since we've registered new device the dev's name
1168 * should be re-allocated
1171 if (tb[IFLA_ADDRESS] == NULL)
1172 eth_hw_addr_random(dev);
1174 if (tb[IFLA_IFNAME])
1175 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1177 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1179 err = register_netdevice(dev);
1181 goto err_register_dev;
1183 netif_carrier_off(dev);
1186 * tie the deviced together
1189 priv = netdev_priv(dev);
1190 rcu_assign_pointer(priv->peer, peer);
1192 priv = netdev_priv(peer);
1193 rcu_assign_pointer(priv->peer, dev);
1200 unregister_netdevice(peer);
1208 static void veth_dellink(struct net_device *dev, struct list_head *head)
1210 struct veth_priv *priv;
1211 struct net_device *peer;
1213 priv = netdev_priv(dev);
1214 peer = rtnl_dereference(priv->peer);
1216 /* Note : dellink() is called from default_device_exit_batch(),
1217 * before a rcu_synchronize() point. The devices are guaranteed
1218 * not being freed before one RCU grace period.
1220 RCU_INIT_POINTER(priv->peer, NULL);
1221 unregister_netdevice_queue(dev, head);
1224 priv = netdev_priv(peer);
1225 RCU_INIT_POINTER(priv->peer, NULL);
1226 unregister_netdevice_queue(peer, head);
1230 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1231 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1234 static struct net *veth_get_link_net(const struct net_device *dev)
1236 struct veth_priv *priv = netdev_priv(dev);
1237 struct net_device *peer = rtnl_dereference(priv->peer);
1239 return peer ? dev_net(peer) : dev_net(dev);
1242 static struct rtnl_link_ops veth_link_ops = {
1244 .priv_size = sizeof(struct veth_priv),
1245 .setup = veth_setup,
1246 .validate = veth_validate,
1247 .newlink = veth_newlink,
1248 .dellink = veth_dellink,
1249 .policy = veth_policy,
1250 .maxtype = VETH_INFO_MAX,
1251 .get_link_net = veth_get_link_net,
1258 static __init int veth_init(void)
1260 return rtnl_link_register(&veth_link_ops);
1263 static __exit void veth_exit(void)
1265 rtnl_link_unregister(&veth_link_ops);
1268 module_init(veth_init);
1269 module_exit(veth_exit);
1271 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1272 MODULE_LICENSE("GPL v2");
1273 MODULE_ALIAS_RTNL_LINK(DRV_NAME);