1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
18 #include <net/rtnetlink.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
30 #define DRV_NAME "veth"
31 #define DRV_VERSION "1.0"
33 #define VETH_XDP_FLAG BIT(0)
34 #define VETH_RING_SIZE 256
35 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
37 #define VETH_XDP_TX_BULK_SIZE 16
38 #define VETH_XDP_BATCH 16
50 u64 peer_tq_xdp_xmit_err;
53 struct veth_rq_stats {
55 struct u64_stats_sync syncp;
59 struct napi_struct xdp_napi;
60 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
61 struct net_device *dev;
62 struct bpf_prog __rcu *xdp_prog;
63 struct xdp_mem_info xdp_mem;
64 struct veth_rq_stats stats;
65 bool rx_notify_masked;
66 struct ptr_ring xdp_ring;
67 struct xdp_rxq_info xdp_rxq;
71 struct net_device __rcu *peer;
73 struct bpf_prog *_xdp_prog;
75 unsigned int requested_headroom;
78 struct veth_xdp_tx_bq {
79 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
87 struct veth_q_stat_desc {
88 char desc[ETH_GSTRING_LEN];
92 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
94 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
95 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
96 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
97 { "drops", VETH_RQ_STAT(rx_drops) },
98 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
99 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
100 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
101 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
104 #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
106 static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
107 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
108 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
111 #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
114 const char string[ETH_GSTRING_LEN];
115 } ethtool_stats_keys[] = {
119 static int veth_get_link_ksettings(struct net_device *dev,
120 struct ethtool_link_ksettings *cmd)
122 cmd->base.speed = SPEED_10000;
123 cmd->base.duplex = DUPLEX_FULL;
124 cmd->base.port = PORT_TP;
125 cmd->base.autoneg = AUTONEG_DISABLE;
129 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
131 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
132 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
135 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
142 memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys));
143 p += sizeof(ethtool_stats_keys);
144 for (i = 0; i < dev->real_num_rx_queues; i++)
145 for (j = 0; j < VETH_RQ_STATS_LEN; j++)
146 ethtool_sprintf(&p, "rx_queue_%u_%.18s",
147 i, veth_rq_stats_desc[j].desc);
149 for (i = 0; i < dev->real_num_tx_queues; i++)
150 for (j = 0; j < VETH_TQ_STATS_LEN; j++)
151 ethtool_sprintf(&p, "tx_queue_%u_%.18s",
152 i, veth_tq_stats_desc[j].desc);
157 static int veth_get_sset_count(struct net_device *dev, int sset)
161 return ARRAY_SIZE(ethtool_stats_keys) +
162 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
163 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
169 static void veth_get_ethtool_stats(struct net_device *dev,
170 struct ethtool_stats *stats, u64 *data)
172 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
173 struct net_device *peer = rtnl_dereference(priv->peer);
176 data[0] = peer ? peer->ifindex : 0;
178 for (i = 0; i < dev->real_num_rx_queues; i++) {
179 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
180 const void *stats_base = (void *)&rq_stats->vs;
185 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
186 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
187 offset = veth_rq_stats_desc[j].offset;
188 data[idx + j] = *(u64 *)(stats_base + offset);
190 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
191 idx += VETH_RQ_STATS_LEN;
197 rcv_priv = netdev_priv(peer);
198 for (i = 0; i < peer->real_num_rx_queues; i++) {
199 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
200 const void *base = (void *)&rq_stats->vs;
201 unsigned int start, tx_idx = idx;
204 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
206 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
207 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
208 offset = veth_tq_stats_desc[j].offset;
209 data[tx_idx + j] += *(u64 *)(base + offset);
211 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
215 static void veth_get_channels(struct net_device *dev,
216 struct ethtool_channels *channels)
218 channels->tx_count = dev->real_num_tx_queues;
219 channels->rx_count = dev->real_num_rx_queues;
220 channels->max_tx = dev->num_tx_queues;
221 channels->max_rx = dev->num_rx_queues;
224 static int veth_set_channels(struct net_device *dev,
225 struct ethtool_channels *ch);
227 static const struct ethtool_ops veth_ethtool_ops = {
228 .get_drvinfo = veth_get_drvinfo,
229 .get_link = ethtool_op_get_link,
230 .get_strings = veth_get_strings,
231 .get_sset_count = veth_get_sset_count,
232 .get_ethtool_stats = veth_get_ethtool_stats,
233 .get_link_ksettings = veth_get_link_ksettings,
234 .get_ts_info = ethtool_op_get_ts_info,
235 .get_channels = veth_get_channels,
236 .set_channels = veth_set_channels,
239 /* general routines */
241 static bool veth_is_xdp_frame(void *ptr)
243 return (unsigned long)ptr & VETH_XDP_FLAG;
246 static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
248 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
251 static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
253 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
256 static void veth_ptr_free(void *ptr)
258 if (veth_is_xdp_frame(ptr))
259 xdp_return_frame(veth_ptr_to_xdp(ptr));
264 static void __veth_xdp_flush(struct veth_rq *rq)
266 /* Write ptr_ring before reading rx_notify_masked */
268 if (!rq->rx_notify_masked) {
269 rq->rx_notify_masked = true;
270 napi_schedule(&rq->xdp_napi);
274 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
276 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
277 dev_kfree_skb_any(skb);
281 return NET_RX_SUCCESS;
284 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
285 struct veth_rq *rq, bool xdp)
287 return __dev_forward_skb(dev, skb) ?: xdp ?
288 veth_xdp_rx(rq, skb) :
292 /* return true if the specified skb has chances of GRO aggregation
293 * Don't strive for accuracy, but try to avoid GRO overhead in the most
295 * When XDP is enabled, all traffic is considered eligible, as the xmit
296 * device has TSO off.
297 * When TSO is enabled on the xmit device, we are likely interested only
298 * in UDP aggregation, explicitly check for that if the skb is suspected
299 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
300 * to belong to locally generated UDP traffic.
302 static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
303 const struct net_device *rcv,
304 const struct sk_buff *skb)
306 return !(dev->features & NETIF_F_ALL_TSO) ||
307 (skb->destructor == sock_wfree &&
308 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
311 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
313 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
314 struct veth_rq *rq = NULL;
315 struct net_device *rcv;
316 int length = skb->len;
317 bool use_napi = false;
321 rcv = rcu_dereference(priv->peer);
322 if (unlikely(!rcv)) {
327 rcv_priv = netdev_priv(rcv);
328 rxq = skb_get_queue_mapping(skb);
329 if (rxq < rcv->real_num_rx_queues) {
330 rq = &rcv_priv->rq[rxq];
332 /* The napi pointer is available when an XDP program is
333 * attached or when GRO is enabled
334 * Don't bother with napi/GRO if the skb can't be aggregated
336 use_napi = rcu_access_pointer(rq->napi) &&
337 veth_skb_is_eligible_for_gro(dev, rcv, skb);
340 skb_tx_timestamp(skb);
341 if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
343 dev_lstats_add(dev, length);
346 atomic64_inc(&priv->dropped);
350 __veth_xdp_flush(rq);
357 static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
359 struct veth_priv *priv = netdev_priv(dev);
361 dev_lstats_read(dev, packets, bytes);
362 return atomic64_read(&priv->dropped);
365 static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
367 struct veth_priv *priv = netdev_priv(dev);
370 result->peer_tq_xdp_xmit_err = 0;
371 result->xdp_packets = 0;
372 result->xdp_tx_err = 0;
373 result->xdp_bytes = 0;
374 result->rx_drops = 0;
375 for (i = 0; i < dev->num_rx_queues; i++) {
376 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
377 struct veth_rq_stats *stats = &priv->rq[i].stats;
381 start = u64_stats_fetch_begin_irq(&stats->syncp);
382 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
383 xdp_tx_err = stats->vs.xdp_tx_err;
384 packets = stats->vs.xdp_packets;
385 bytes = stats->vs.xdp_bytes;
386 drops = stats->vs.rx_drops;
387 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
388 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
389 result->xdp_tx_err += xdp_tx_err;
390 result->xdp_packets += packets;
391 result->xdp_bytes += bytes;
392 result->rx_drops += drops;
396 static void veth_get_stats64(struct net_device *dev,
397 struct rtnl_link_stats64 *tot)
399 struct veth_priv *priv = netdev_priv(dev);
400 struct net_device *peer;
401 struct veth_stats rx;
404 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
405 tot->tx_bytes = bytes;
406 tot->tx_packets = packets;
408 veth_stats_rx(&rx, dev);
409 tot->tx_dropped += rx.xdp_tx_err;
410 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
411 tot->rx_bytes = rx.xdp_bytes;
412 tot->rx_packets = rx.xdp_packets;
415 peer = rcu_dereference(priv->peer);
417 veth_stats_tx(peer, &packets, &bytes);
418 tot->rx_bytes += bytes;
419 tot->rx_packets += packets;
421 veth_stats_rx(&rx, peer);
422 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
423 tot->rx_dropped += rx.xdp_tx_err;
424 tot->tx_bytes += rx.xdp_bytes;
425 tot->tx_packets += rx.xdp_packets;
430 /* fake multicast ability */
431 static void veth_set_multicast_list(struct net_device *dev)
435 static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
440 skb = build_skb(head, buflen);
444 skb_reserve(skb, headroom);
450 static int veth_select_rxq(struct net_device *dev)
452 return smp_processor_id() % dev->real_num_rx_queues;
455 static struct net_device *veth_peer_dev(struct net_device *dev)
457 struct veth_priv *priv = netdev_priv(dev);
459 /* Callers must be under RCU read side. */
460 return rcu_dereference(priv->peer);
463 static int veth_xdp_xmit(struct net_device *dev, int n,
464 struct xdp_frame **frames,
465 u32 flags, bool ndo_xmit)
467 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
468 int i, ret = -ENXIO, nxmit = 0;
469 struct net_device *rcv;
470 unsigned int max_len;
473 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
477 rcv = rcu_dereference(priv->peer);
481 rcv_priv = netdev_priv(rcv);
482 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
483 /* The napi pointer is set if NAPI is enabled, which ensures that
484 * xdp_ring is initialized on receive side and the peer device is up.
486 if (!rcu_access_pointer(rq->napi))
489 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
491 spin_lock(&rq->xdp_ring.producer_lock);
492 for (i = 0; i < n; i++) {
493 struct xdp_frame *frame = frames[i];
494 void *ptr = veth_xdp_to_ptr(frame);
496 if (unlikely(frame->len > max_len ||
497 __ptr_ring_produce(&rq->xdp_ring, ptr)))
501 spin_unlock(&rq->xdp_ring.producer_lock);
503 if (flags & XDP_XMIT_FLUSH)
504 __veth_xdp_flush(rq);
508 u64_stats_update_begin(&rq->stats.syncp);
509 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
510 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
511 u64_stats_update_end(&rq->stats.syncp);
520 static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
521 struct xdp_frame **frames, u32 flags)
525 err = veth_xdp_xmit(dev, n, frames, flags, true);
527 struct veth_priv *priv = netdev_priv(dev);
529 atomic64_add(n, &priv->dropped);
535 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
537 int sent, i, err = 0, drops;
539 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
545 for (i = sent; unlikely(i < bq->count); i++)
546 xdp_return_frame(bq->q[i]);
548 drops = bq->count - sent;
549 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
551 u64_stats_update_begin(&rq->stats.syncp);
552 rq->stats.vs.xdp_tx += sent;
553 rq->stats.vs.xdp_tx_err += drops;
554 u64_stats_update_end(&rq->stats.syncp);
559 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
561 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
562 struct net_device *rcv;
563 struct veth_rq *rcv_rq;
566 veth_xdp_flush_bq(rq, bq);
567 rcv = rcu_dereference(priv->peer);
571 rcv_priv = netdev_priv(rcv);
572 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
573 /* xdp_ring is initialized on receive side? */
574 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
577 __veth_xdp_flush(rcv_rq);
582 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
583 struct veth_xdp_tx_bq *bq)
585 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
587 if (unlikely(!frame))
590 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
591 veth_xdp_flush_bq(rq, bq);
593 bq->q[bq->count++] = frame;
598 static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
599 struct xdp_frame *frame,
600 struct veth_xdp_tx_bq *bq,
601 struct veth_stats *stats)
603 struct xdp_frame orig_frame;
604 struct bpf_prog *xdp_prog;
607 xdp_prog = rcu_dereference(rq->xdp_prog);
608 if (likely(xdp_prog)) {
612 xdp_convert_frame_to_buff(frame, &xdp);
613 xdp.rxq = &rq->xdp_rxq;
615 act = bpf_prog_run_xdp(xdp_prog, &xdp);
619 if (xdp_update_frame_from_buff(&xdp, frame))
624 xdp.rxq->mem = frame->mem;
625 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
626 trace_xdp_exception(rq->dev, xdp_prog, act);
636 xdp.rxq->mem = frame->mem;
637 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
642 stats->xdp_redirect++;
646 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
649 trace_xdp_exception(rq->dev, xdp_prog, act);
661 xdp_return_frame(frame);
666 /* frames array contains VETH_XDP_BATCH at most */
667 static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
668 int n_xdpf, struct veth_xdp_tx_bq *bq,
669 struct veth_stats *stats)
671 void *skbs[VETH_XDP_BATCH];
674 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
675 GFP_ATOMIC | __GFP_ZERO) < 0) {
676 for (i = 0; i < n_xdpf; i++)
677 xdp_return_frame(frames[i]);
678 stats->rx_drops += n_xdpf;
683 for (i = 0; i < n_xdpf; i++) {
684 struct sk_buff *skb = skbs[i];
686 skb = __xdp_build_skb_from_frame(frames[i], skb,
689 xdp_return_frame(frames[i]);
693 napi_gro_receive(&rq->xdp_napi, skb);
697 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
699 struct veth_xdp_tx_bq *bq,
700 struct veth_stats *stats)
702 u32 pktlen, headroom, act, metalen, frame_sz;
703 void *orig_data, *orig_data_end;
704 struct bpf_prog *xdp_prog;
705 int mac_len, delta, off;
708 skb_prepare_for_gro(skb);
711 xdp_prog = rcu_dereference(rq->xdp_prog);
712 if (unlikely(!xdp_prog)) {
717 mac_len = skb->data - skb_mac_header(skb);
718 pktlen = skb->len + mac_len;
719 headroom = skb_headroom(skb) - mac_len;
721 if (skb_shared(skb) || skb_head_is_locked(skb) ||
722 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
723 struct sk_buff *nskb;
728 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
729 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
730 if (size > PAGE_SIZE)
733 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
737 head = page_address(page);
738 start = head + VETH_XDP_HEADROOM;
739 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
740 page_frag_free(head);
744 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
745 skb->len, PAGE_SIZE);
747 page_frag_free(head);
751 skb_copy_header(nskb, skb);
752 head_off = skb_headroom(nskb) - skb_headroom(skb);
753 skb_headers_offset_update(nskb, head_off);
758 /* SKB "head" area always have tailroom for skb_shared_info */
759 frame_sz = skb_end_pointer(skb) - skb->head;
760 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
761 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
762 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
764 orig_data = xdp.data;
765 orig_data_end = xdp.data_end;
767 act = bpf_prog_run_xdp(xdp_prog, &xdp);
773 get_page(virt_to_page(xdp.data));
775 xdp.rxq->mem = rq->xdp_mem;
776 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
777 trace_xdp_exception(rq->dev, xdp_prog, act);
785 get_page(virt_to_page(xdp.data));
787 xdp.rxq->mem = rq->xdp_mem;
788 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
792 stats->xdp_redirect++;
796 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
799 trace_xdp_exception(rq->dev, xdp_prog, act);
807 /* check if bpf_xdp_adjust_head was used */
808 delta = orig_data - xdp.data;
809 off = mac_len + delta;
811 __skb_push(skb, off);
813 __skb_pull(skb, -off);
814 skb->mac_header -= delta;
816 /* check if bpf_xdp_adjust_tail was used */
817 off = xdp.data_end - orig_data_end;
819 __skb_put(skb, off); /* positive on grow, negative on shrink */
820 skb->protocol = eth_type_trans(skb, rq->dev);
822 metalen = xdp.data - xdp.data_meta;
824 skb_metadata_set(skb, metalen);
835 page_frag_free(xdp.data);
840 static int veth_xdp_rcv(struct veth_rq *rq, int budget,
841 struct veth_xdp_tx_bq *bq,
842 struct veth_stats *stats)
844 int i, done = 0, n_xdpf = 0;
845 void *xdpf[VETH_XDP_BATCH];
847 for (i = 0; i < budget; i++) {
848 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
853 if (veth_is_xdp_frame(ptr)) {
855 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
857 stats->xdp_bytes += frame->len;
858 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
861 xdpf[n_xdpf++] = frame;
862 if (n_xdpf == VETH_XDP_BATCH) {
863 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
870 struct sk_buff *skb = ptr;
872 stats->xdp_bytes += skb->len;
873 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
875 if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
876 netif_receive_skb(skb);
878 napi_gro_receive(&rq->xdp_napi, skb);
885 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
887 u64_stats_update_begin(&rq->stats.syncp);
888 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
889 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
890 rq->stats.vs.xdp_drops += stats->xdp_drops;
891 rq->stats.vs.rx_drops += stats->rx_drops;
892 rq->stats.vs.xdp_packets += done;
893 u64_stats_update_end(&rq->stats.syncp);
898 static int veth_poll(struct napi_struct *napi, int budget)
901 container_of(napi, struct veth_rq, xdp_napi);
902 struct veth_stats stats = {};
903 struct veth_xdp_tx_bq bq;
908 xdp_set_return_frame_no_direct();
909 done = veth_xdp_rcv(rq, budget, &bq, &stats);
911 if (done < budget && napi_complete_done(napi, done)) {
912 /* Write rx_notify_masked before reading ptr_ring */
913 smp_store_mb(rq->rx_notify_masked, false);
914 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
915 rq->rx_notify_masked = true;
916 napi_schedule(&rq->xdp_napi);
920 if (stats.xdp_tx > 0)
921 veth_xdp_flush(rq, &bq);
922 if (stats.xdp_redirect > 0)
924 xdp_clear_return_frame_no_direct();
929 static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
931 struct veth_priv *priv = netdev_priv(dev);
934 for (i = start; i < end; i++) {
935 struct veth_rq *rq = &priv->rq[i];
937 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
942 for (i = start; i < end; i++) {
943 struct veth_rq *rq = &priv->rq[i];
945 napi_enable(&rq->xdp_napi);
946 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
952 for (i--; i >= start; i--)
953 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
958 static int __veth_napi_enable(struct net_device *dev)
960 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
963 static void veth_napi_del_range(struct net_device *dev, int start, int end)
965 struct veth_priv *priv = netdev_priv(dev);
968 for (i = start; i < end; i++) {
969 struct veth_rq *rq = &priv->rq[i];
971 rcu_assign_pointer(priv->rq[i].napi, NULL);
972 napi_disable(&rq->xdp_napi);
973 __netif_napi_del(&rq->xdp_napi);
977 for (i = start; i < end; i++) {
978 struct veth_rq *rq = &priv->rq[i];
980 rq->rx_notify_masked = false;
981 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
985 static void veth_napi_del(struct net_device *dev)
987 veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
990 static bool veth_gro_requested(const struct net_device *dev)
992 return !!(dev->wanted_features & NETIF_F_GRO);
995 static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
996 bool napi_already_on)
998 struct veth_priv *priv = netdev_priv(dev);
1001 for (i = start; i < end; i++) {
1002 struct veth_rq *rq = &priv->rq[i];
1004 if (!napi_already_on)
1005 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1006 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1010 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1011 MEM_TYPE_PAGE_SHARED,
1016 /* Save original mem info as it can be overwritten */
1017 rq->xdp_mem = rq->xdp_rxq.mem;
1022 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1024 for (i--; i >= start; i--) {
1025 struct veth_rq *rq = &priv->rq[i];
1027 xdp_rxq_info_unreg(&rq->xdp_rxq);
1028 if (!napi_already_on)
1029 netif_napi_del(&rq->xdp_napi);
1035 static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1038 struct veth_priv *priv = netdev_priv(dev);
1041 for (i = start; i < end; i++) {
1042 struct veth_rq *rq = &priv->rq[i];
1044 rq->xdp_rxq.mem = rq->xdp_mem;
1045 xdp_rxq_info_unreg(&rq->xdp_rxq);
1048 netif_napi_del(&rq->xdp_napi);
1052 static int veth_enable_xdp(struct net_device *dev)
1054 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
1055 struct veth_priv *priv = netdev_priv(dev);
1058 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
1059 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1063 if (!napi_already_on) {
1064 err = __veth_napi_enable(dev);
1066 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1070 if (!veth_gro_requested(dev)) {
1071 /* user-space did not require GRO, but adding XDP
1072 * is supposed to get GRO working
1074 dev->features |= NETIF_F_GRO;
1075 netdev_features_change(dev);
1080 for (i = 0; i < dev->real_num_rx_queues; i++) {
1081 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
1082 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1088 static void veth_disable_xdp(struct net_device *dev)
1090 struct veth_priv *priv = netdev_priv(dev);
1093 for (i = 0; i < dev->real_num_rx_queues; i++)
1094 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
1096 if (!netif_running(dev) || !veth_gro_requested(dev)) {
1099 /* if user-space did not require GRO, since adding XDP
1100 * enabled it, clear it now
1102 if (!veth_gro_requested(dev) && netif_running(dev)) {
1103 dev->features &= ~NETIF_F_GRO;
1104 netdev_features_change(dev);
1108 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
1111 static int veth_napi_enable_range(struct net_device *dev, int start, int end)
1113 struct veth_priv *priv = netdev_priv(dev);
1116 for (i = start; i < end; i++) {
1117 struct veth_rq *rq = &priv->rq[i];
1119 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1122 err = __veth_napi_enable_range(dev, start, end);
1124 for (i = start; i < end; i++) {
1125 struct veth_rq *rq = &priv->rq[i];
1127 netif_napi_del(&rq->xdp_napi);
1134 static int veth_napi_enable(struct net_device *dev)
1136 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1139 static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1141 struct veth_priv *priv = netdev_priv(dev);
1146 if (priv->_xdp_prog) {
1147 veth_napi_del_range(dev, start, end);
1148 veth_disable_xdp_range(dev, start, end, false);
1149 } else if (veth_gro_requested(dev)) {
1150 veth_napi_del_range(dev, start, end);
1154 static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1156 struct veth_priv *priv = netdev_priv(dev);
1162 if (priv->_xdp_prog) {
1163 /* these channels are freshly initialized, napi is not on there even
1164 * when GRO is requeste
1166 err = veth_enable_xdp_range(dev, start, end, false);
1170 err = __veth_napi_enable_range(dev, start, end);
1172 /* on error always delete the newly added napis */
1173 veth_disable_xdp_range(dev, start, end, true);
1176 } else if (veth_gro_requested(dev)) {
1177 return veth_napi_enable_range(dev, start, end);
1182 static int veth_set_channels(struct net_device *dev,
1183 struct ethtool_channels *ch)
1185 struct veth_priv *priv = netdev_priv(dev);
1186 unsigned int old_rx_count, new_rx_count;
1187 struct veth_priv *peer_priv;
1188 struct net_device *peer;
1191 /* sanity check. Upper bounds are already enforced by the caller */
1192 if (!ch->rx_count || !ch->tx_count)
1195 /* avoid braking XDP, if that is enabled */
1196 peer = rtnl_dereference(priv->peer);
1197 peer_priv = peer ? netdev_priv(peer) : NULL;
1198 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1201 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1204 old_rx_count = dev->real_num_rx_queues;
1205 new_rx_count = ch->rx_count;
1206 if (netif_running(dev)) {
1207 /* turn device off */
1208 netif_carrier_off(dev);
1210 netif_carrier_off(peer);
1212 /* try to allocate new resurces, as needed*/
1213 err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1218 err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1222 err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1224 int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1226 /* this error condition could happen only if rx and tx change
1227 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1228 * and we can't do anything to fully restore the original
1232 pr_warn("Can't restore rx queues config %d -> %d %d",
1233 new_rx_count, old_rx_count, err2);
1239 if (netif_running(dev)) {
1240 /* note that we need to swap the arguments WRT the enable part
1241 * to identify the range we have to disable
1243 veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1244 netif_carrier_on(dev);
1246 netif_carrier_on(peer);
1251 new_rx_count = old_rx_count;
1252 old_rx_count = ch->rx_count;
1256 static int veth_open(struct net_device *dev)
1258 struct veth_priv *priv = netdev_priv(dev);
1259 struct net_device *peer = rtnl_dereference(priv->peer);
1265 if (priv->_xdp_prog) {
1266 err = veth_enable_xdp(dev);
1269 } else if (veth_gro_requested(dev)) {
1270 err = veth_napi_enable(dev);
1275 if (peer->flags & IFF_UP) {
1276 netif_carrier_on(dev);
1277 netif_carrier_on(peer);
1283 static int veth_close(struct net_device *dev)
1285 struct veth_priv *priv = netdev_priv(dev);
1286 struct net_device *peer = rtnl_dereference(priv->peer);
1288 netif_carrier_off(dev);
1290 netif_carrier_off(peer);
1292 if (priv->_xdp_prog)
1293 veth_disable_xdp(dev);
1294 else if (veth_gro_requested(dev))
1300 static int is_valid_veth_mtu(int mtu)
1302 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1305 static int veth_alloc_queues(struct net_device *dev)
1307 struct veth_priv *priv = netdev_priv(dev);
1310 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1314 for (i = 0; i < dev->num_rx_queues; i++) {
1315 priv->rq[i].dev = dev;
1316 u64_stats_init(&priv->rq[i].stats.syncp);
1322 static void veth_free_queues(struct net_device *dev)
1324 struct veth_priv *priv = netdev_priv(dev);
1329 static int veth_dev_init(struct net_device *dev)
1333 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1337 err = veth_alloc_queues(dev);
1339 free_percpu(dev->lstats);
1346 static void veth_dev_free(struct net_device *dev)
1348 veth_free_queues(dev);
1349 free_percpu(dev->lstats);
1352 #ifdef CONFIG_NET_POLL_CONTROLLER
1353 static void veth_poll_controller(struct net_device *dev)
1355 /* veth only receives frames when its peer sends one
1356 * Since it has nothing to do with disabling irqs, we are guaranteed
1357 * never to have pending data when we poll for it so
1358 * there is nothing to do here.
1360 * We need this though so netpoll recognizes us as an interface that
1361 * supports polling, which enables bridge devices in virt setups to
1362 * still use netconsole
1365 #endif /* CONFIG_NET_POLL_CONTROLLER */
1367 static int veth_get_iflink(const struct net_device *dev)
1369 struct veth_priv *priv = netdev_priv(dev);
1370 struct net_device *peer;
1374 peer = rcu_dereference(priv->peer);
1375 iflink = peer ? peer->ifindex : 0;
1381 static netdev_features_t veth_fix_features(struct net_device *dev,
1382 netdev_features_t features)
1384 struct veth_priv *priv = netdev_priv(dev);
1385 struct net_device *peer;
1387 peer = rtnl_dereference(priv->peer);
1389 struct veth_priv *peer_priv = netdev_priv(peer);
1391 if (peer_priv->_xdp_prog)
1392 features &= ~NETIF_F_GSO_SOFTWARE;
1394 if (priv->_xdp_prog)
1395 features |= NETIF_F_GRO;
1400 static int veth_set_features(struct net_device *dev,
1401 netdev_features_t features)
1403 netdev_features_t changed = features ^ dev->features;
1404 struct veth_priv *priv = netdev_priv(dev);
1407 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1410 if (features & NETIF_F_GRO) {
1411 err = veth_napi_enable(dev);
1420 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1422 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1423 struct net_device *peer;
1429 peer = rcu_dereference(priv->peer);
1430 if (unlikely(!peer))
1433 peer_priv = netdev_priv(peer);
1434 priv->requested_headroom = new_hr;
1435 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1436 dev->needed_headroom = new_hr;
1437 peer->needed_headroom = new_hr;
1443 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1444 struct netlink_ext_ack *extack)
1446 struct veth_priv *priv = netdev_priv(dev);
1447 struct bpf_prog *old_prog;
1448 struct net_device *peer;
1449 unsigned int max_mtu;
1452 old_prog = priv->_xdp_prog;
1453 priv->_xdp_prog = prog;
1454 peer = rtnl_dereference(priv->peer);
1458 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1463 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1464 peer->hard_header_len -
1465 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1466 if (peer->mtu > max_mtu) {
1467 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1472 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1473 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1478 if (dev->flags & IFF_UP) {
1479 err = veth_enable_xdp(dev);
1481 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1487 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1488 peer->max_mtu = max_mtu;
1494 if (dev->flags & IFF_UP)
1495 veth_disable_xdp(dev);
1498 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1499 peer->max_mtu = ETH_MAX_MTU;
1502 bpf_prog_put(old_prog);
1505 if ((!!old_prog ^ !!prog) && peer)
1506 netdev_update_features(peer);
1510 priv->_xdp_prog = old_prog;
1515 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1517 switch (xdp->command) {
1518 case XDP_SETUP_PROG:
1519 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1525 static const struct net_device_ops veth_netdev_ops = {
1526 .ndo_init = veth_dev_init,
1527 .ndo_open = veth_open,
1528 .ndo_stop = veth_close,
1529 .ndo_start_xmit = veth_xmit,
1530 .ndo_get_stats64 = veth_get_stats64,
1531 .ndo_set_rx_mode = veth_set_multicast_list,
1532 .ndo_set_mac_address = eth_mac_addr,
1533 #ifdef CONFIG_NET_POLL_CONTROLLER
1534 .ndo_poll_controller = veth_poll_controller,
1536 .ndo_get_iflink = veth_get_iflink,
1537 .ndo_fix_features = veth_fix_features,
1538 .ndo_set_features = veth_set_features,
1539 .ndo_features_check = passthru_features_check,
1540 .ndo_set_rx_headroom = veth_set_rx_headroom,
1541 .ndo_bpf = veth_xdp,
1542 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
1543 .ndo_get_peer_dev = veth_peer_dev,
1546 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1547 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1548 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1549 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1550 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1552 static void veth_setup(struct net_device *dev)
1556 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1557 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1558 dev->priv_flags |= IFF_NO_QUEUE;
1559 dev->priv_flags |= IFF_PHONY_HEADROOM;
1561 dev->netdev_ops = &veth_netdev_ops;
1562 dev->ethtool_ops = &veth_ethtool_ops;
1563 dev->features |= NETIF_F_LLTX;
1564 dev->features |= VETH_FEATURES;
1565 dev->vlan_features = dev->features &
1566 ~(NETIF_F_HW_VLAN_CTAG_TX |
1567 NETIF_F_HW_VLAN_STAG_TX |
1568 NETIF_F_HW_VLAN_CTAG_RX |
1569 NETIF_F_HW_VLAN_STAG_RX);
1570 dev->needs_free_netdev = true;
1571 dev->priv_destructor = veth_dev_free;
1572 dev->max_mtu = ETH_MAX_MTU;
1574 dev->hw_features = VETH_FEATURES;
1575 dev->hw_enc_features = VETH_FEATURES;
1576 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1583 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1584 struct netlink_ext_ack *extack)
1586 if (tb[IFLA_ADDRESS]) {
1587 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1589 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1590 return -EADDRNOTAVAIL;
1593 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1599 static struct rtnl_link_ops veth_link_ops;
1601 static void veth_disable_gro(struct net_device *dev)
1603 dev->features &= ~NETIF_F_GRO;
1604 dev->wanted_features &= ~NETIF_F_GRO;
1605 netdev_update_features(dev);
1608 static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1612 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1613 err = netif_set_real_num_tx_queues(dev, 1);
1617 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1618 err = netif_set_real_num_rx_queues(dev, 1);
1625 static int veth_newlink(struct net *src_net, struct net_device *dev,
1626 struct nlattr *tb[], struct nlattr *data[],
1627 struct netlink_ext_ack *extack)
1630 struct net_device *peer;
1631 struct veth_priv *priv;
1632 char ifname[IFNAMSIZ];
1633 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1634 unsigned char name_assign_type;
1635 struct ifinfomsg *ifmp;
1639 * create and register peer first
1641 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1642 struct nlattr *nla_peer;
1644 nla_peer = data[VETH_INFO_PEER];
1645 ifmp = nla_data(nla_peer);
1646 err = rtnl_nla_parse_ifla(peer_tb,
1647 nla_data(nla_peer) + sizeof(struct ifinfomsg),
1648 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1653 err = veth_validate(peer_tb, NULL, extack);
1663 if (ifmp && tbp[IFLA_IFNAME]) {
1664 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1665 name_assign_type = NET_NAME_USER;
1667 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1668 name_assign_type = NET_NAME_ENUM;
1671 net = rtnl_link_get_net(src_net, tbp);
1673 return PTR_ERR(net);
1675 peer = rtnl_create_link(net, ifname, name_assign_type,
1676 &veth_link_ops, tbp, extack);
1679 return PTR_ERR(peer);
1682 if (!ifmp || !tbp[IFLA_ADDRESS])
1683 eth_hw_addr_random(peer);
1685 if (ifmp && (dev->ifindex != 0))
1686 peer->ifindex = ifmp->ifi_index;
1688 netif_set_gso_max_size(peer, dev->gso_max_size);
1689 netif_set_gso_max_segs(peer, dev->gso_max_segs);
1691 err = register_netdevice(peer);
1695 goto err_register_peer;
1697 /* keep GRO disabled by default to be consistent with the established
1700 veth_disable_gro(peer);
1701 netif_carrier_off(peer);
1703 err = rtnl_configure_link(peer, ifmp);
1705 goto err_configure_peer;
1710 * note, that since we've registered new device the dev's name
1711 * should be re-allocated
1714 if (tb[IFLA_ADDRESS] == NULL)
1715 eth_hw_addr_random(dev);
1717 if (tb[IFLA_IFNAME])
1718 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1720 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1722 err = register_netdevice(dev);
1724 goto err_register_dev;
1726 netif_carrier_off(dev);
1729 * tie the deviced together
1732 priv = netdev_priv(dev);
1733 rcu_assign_pointer(priv->peer, peer);
1734 err = veth_init_queues(dev, tb);
1738 priv = netdev_priv(peer);
1739 rcu_assign_pointer(priv->peer, dev);
1740 err = veth_init_queues(peer, tb);
1744 veth_disable_gro(dev);
1748 unregister_netdevice(dev);
1752 unregister_netdevice(peer);
1760 static void veth_dellink(struct net_device *dev, struct list_head *head)
1762 struct veth_priv *priv;
1763 struct net_device *peer;
1765 priv = netdev_priv(dev);
1766 peer = rtnl_dereference(priv->peer);
1768 /* Note : dellink() is called from default_device_exit_batch(),
1769 * before a rcu_synchronize() point. The devices are guaranteed
1770 * not being freed before one RCU grace period.
1772 RCU_INIT_POINTER(priv->peer, NULL);
1773 unregister_netdevice_queue(dev, head);
1776 priv = netdev_priv(peer);
1777 RCU_INIT_POINTER(priv->peer, NULL);
1778 unregister_netdevice_queue(peer, head);
1782 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1783 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1786 static struct net *veth_get_link_net(const struct net_device *dev)
1788 struct veth_priv *priv = netdev_priv(dev);
1789 struct net_device *peer = rtnl_dereference(priv->peer);
1791 return peer ? dev_net(peer) : dev_net(dev);
1794 static unsigned int veth_get_num_queues(void)
1796 /* enforce the same queue limit as rtnl_create_link */
1797 int queues = num_possible_cpus();
1804 static struct rtnl_link_ops veth_link_ops = {
1806 .priv_size = sizeof(struct veth_priv),
1807 .setup = veth_setup,
1808 .validate = veth_validate,
1809 .newlink = veth_newlink,
1810 .dellink = veth_dellink,
1811 .policy = veth_policy,
1812 .maxtype = VETH_INFO_MAX,
1813 .get_link_net = veth_get_link_net,
1814 .get_num_tx_queues = veth_get_num_queues,
1815 .get_num_rx_queues = veth_get_num_queues,
1822 static __init int veth_init(void)
1824 return rtnl_link_register(&veth_link_ops);
1827 static __exit void veth_exit(void)
1829 rtnl_link_unregister(&veth_link_ops);
1832 module_init(veth_init);
1833 module_exit(veth_exit);
1835 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1836 MODULE_LICENSE("GPL v2");
1837 MODULE_ALIAS_RTNL_LINK(DRV_NAME);