1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
18 #include <net/rtnetlink.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
30 #define DRV_NAME "veth"
31 #define DRV_VERSION "1.0"
33 #define VETH_XDP_FLAG BIT(0)
34 #define VETH_RING_SIZE 256
35 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
37 #define VETH_XDP_TX_BULK_SIZE 16
38 #define VETH_XDP_BATCH 16
50 u64 peer_tq_xdp_xmit_err;
53 struct veth_rq_stats {
55 struct u64_stats_sync syncp;
59 struct napi_struct xdp_napi;
60 struct net_device *dev;
61 struct bpf_prog __rcu *xdp_prog;
62 struct xdp_mem_info xdp_mem;
63 struct veth_rq_stats stats;
64 bool rx_notify_masked;
65 struct ptr_ring xdp_ring;
66 struct xdp_rxq_info xdp_rxq;
70 struct net_device __rcu *peer;
72 struct bpf_prog *_xdp_prog;
74 unsigned int requested_headroom;
77 struct veth_xdp_tx_bq {
78 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
86 struct veth_q_stat_desc {
87 char desc[ETH_GSTRING_LEN];
91 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
93 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
94 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
95 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
96 { "drops", VETH_RQ_STAT(rx_drops) },
97 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
98 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
99 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
100 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
103 #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
105 static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
106 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
107 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
110 #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
113 const char string[ETH_GSTRING_LEN];
114 } ethtool_stats_keys[] = {
118 static int veth_get_link_ksettings(struct net_device *dev,
119 struct ethtool_link_ksettings *cmd)
121 cmd->base.speed = SPEED_10000;
122 cmd->base.duplex = DUPLEX_FULL;
123 cmd->base.port = PORT_TP;
124 cmd->base.autoneg = AUTONEG_DISABLE;
128 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
130 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
131 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
134 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
136 char *p = (char *)buf;
141 memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys));
142 p += sizeof(ethtool_stats_keys);
143 for (i = 0; i < dev->real_num_rx_queues; i++) {
144 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
145 snprintf(p, ETH_GSTRING_LEN,
147 i, veth_rq_stats_desc[j].desc);
148 p += ETH_GSTRING_LEN;
151 for (i = 0; i < dev->real_num_tx_queues; i++) {
152 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
153 snprintf(p, ETH_GSTRING_LEN,
155 i, veth_tq_stats_desc[j].desc);
156 p += ETH_GSTRING_LEN;
163 static int veth_get_sset_count(struct net_device *dev, int sset)
167 return ARRAY_SIZE(ethtool_stats_keys) +
168 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
169 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
175 static void veth_get_ethtool_stats(struct net_device *dev,
176 struct ethtool_stats *stats, u64 *data)
178 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
179 struct net_device *peer = rtnl_dereference(priv->peer);
182 data[0] = peer ? peer->ifindex : 0;
184 for (i = 0; i < dev->real_num_rx_queues; i++) {
185 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
186 const void *stats_base = (void *)&rq_stats->vs;
191 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
192 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
193 offset = veth_rq_stats_desc[j].offset;
194 data[idx + j] = *(u64 *)(stats_base + offset);
196 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
197 idx += VETH_RQ_STATS_LEN;
203 rcv_priv = netdev_priv(peer);
204 for (i = 0; i < peer->real_num_rx_queues; i++) {
205 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
206 const void *base = (void *)&rq_stats->vs;
207 unsigned int start, tx_idx = idx;
210 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
212 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
213 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
214 offset = veth_tq_stats_desc[j].offset;
215 data[tx_idx + j] += *(u64 *)(base + offset);
217 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
221 static const struct ethtool_ops veth_ethtool_ops = {
222 .get_drvinfo = veth_get_drvinfo,
223 .get_link = ethtool_op_get_link,
224 .get_strings = veth_get_strings,
225 .get_sset_count = veth_get_sset_count,
226 .get_ethtool_stats = veth_get_ethtool_stats,
227 .get_link_ksettings = veth_get_link_ksettings,
228 .get_ts_info = ethtool_op_get_ts_info,
231 /* general routines */
233 static bool veth_is_xdp_frame(void *ptr)
235 return (unsigned long)ptr & VETH_XDP_FLAG;
238 static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
240 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
243 static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
245 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
248 static void veth_ptr_free(void *ptr)
250 if (veth_is_xdp_frame(ptr))
251 xdp_return_frame(veth_ptr_to_xdp(ptr));
256 static void __veth_xdp_flush(struct veth_rq *rq)
258 /* Write ptr_ring before reading rx_notify_masked */
260 if (!rq->rx_notify_masked) {
261 rq->rx_notify_masked = true;
262 napi_schedule(&rq->xdp_napi);
266 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
268 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
269 dev_kfree_skb_any(skb);
273 return NET_RX_SUCCESS;
276 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
277 struct veth_rq *rq, bool xdp)
279 return __dev_forward_skb(dev, skb) ?: xdp ?
280 veth_xdp_rx(rq, skb) :
284 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
286 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
287 struct veth_rq *rq = NULL;
288 struct net_device *rcv;
289 int length = skb->len;
290 bool rcv_xdp = false;
294 rcv = rcu_dereference(priv->peer);
295 if (unlikely(!rcv)) {
300 rcv_priv = netdev_priv(rcv);
301 rxq = skb_get_queue_mapping(skb);
302 if (rxq < rcv->real_num_rx_queues) {
303 rq = &rcv_priv->rq[rxq];
304 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
305 skb_record_rx_queue(skb, rxq);
308 skb_tx_timestamp(skb);
309 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
311 dev_lstats_add(dev, length);
314 atomic64_inc(&priv->dropped);
318 __veth_xdp_flush(rq);
325 static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
327 struct veth_priv *priv = netdev_priv(dev);
329 dev_lstats_read(dev, packets, bytes);
330 return atomic64_read(&priv->dropped);
333 static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
335 struct veth_priv *priv = netdev_priv(dev);
338 result->peer_tq_xdp_xmit_err = 0;
339 result->xdp_packets = 0;
340 result->xdp_tx_err = 0;
341 result->xdp_bytes = 0;
342 result->rx_drops = 0;
343 for (i = 0; i < dev->num_rx_queues; i++) {
344 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
345 struct veth_rq_stats *stats = &priv->rq[i].stats;
349 start = u64_stats_fetch_begin_irq(&stats->syncp);
350 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
351 xdp_tx_err = stats->vs.xdp_tx_err;
352 packets = stats->vs.xdp_packets;
353 bytes = stats->vs.xdp_bytes;
354 drops = stats->vs.rx_drops;
355 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
356 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
357 result->xdp_tx_err += xdp_tx_err;
358 result->xdp_packets += packets;
359 result->xdp_bytes += bytes;
360 result->rx_drops += drops;
364 static void veth_get_stats64(struct net_device *dev,
365 struct rtnl_link_stats64 *tot)
367 struct veth_priv *priv = netdev_priv(dev);
368 struct net_device *peer;
369 struct veth_stats rx;
372 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
373 tot->tx_bytes = bytes;
374 tot->tx_packets = packets;
376 veth_stats_rx(&rx, dev);
377 tot->tx_dropped += rx.xdp_tx_err;
378 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
379 tot->rx_bytes = rx.xdp_bytes;
380 tot->rx_packets = rx.xdp_packets;
383 peer = rcu_dereference(priv->peer);
385 veth_stats_tx(peer, &packets, &bytes);
386 tot->rx_bytes += bytes;
387 tot->rx_packets += packets;
389 veth_stats_rx(&rx, peer);
390 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
391 tot->rx_dropped += rx.xdp_tx_err;
392 tot->tx_bytes += rx.xdp_bytes;
393 tot->tx_packets += rx.xdp_packets;
398 /* fake multicast ability */
399 static void veth_set_multicast_list(struct net_device *dev)
403 static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
408 skb = build_skb(head, buflen);
412 skb_reserve(skb, headroom);
418 static int veth_select_rxq(struct net_device *dev)
420 return smp_processor_id() % dev->real_num_rx_queues;
423 static struct net_device *veth_peer_dev(struct net_device *dev)
425 struct veth_priv *priv = netdev_priv(dev);
427 /* Callers must be under RCU read side. */
428 return rcu_dereference(priv->peer);
431 static int veth_xdp_xmit(struct net_device *dev, int n,
432 struct xdp_frame **frames,
433 u32 flags, bool ndo_xmit)
435 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
436 int i, ret = -ENXIO, drops = 0;
437 struct net_device *rcv;
438 unsigned int max_len;
441 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
445 rcv = rcu_dereference(priv->peer);
449 rcv_priv = netdev_priv(rcv);
450 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
451 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
452 * side. This means an XDP program is loaded on the peer and the peer
455 if (!rcu_access_pointer(rq->xdp_prog))
458 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
460 spin_lock(&rq->xdp_ring.producer_lock);
461 for (i = 0; i < n; i++) {
462 struct xdp_frame *frame = frames[i];
463 void *ptr = veth_xdp_to_ptr(frame);
465 if (unlikely(frame->len > max_len ||
466 __ptr_ring_produce(&rq->xdp_ring, ptr))) {
467 xdp_return_frame_rx_napi(frame);
471 spin_unlock(&rq->xdp_ring.producer_lock);
473 if (flags & XDP_XMIT_FLUSH)
474 __veth_xdp_flush(rq);
478 u64_stats_update_begin(&rq->stats.syncp);
479 rq->stats.vs.peer_tq_xdp_xmit += n - drops;
480 rq->stats.vs.peer_tq_xdp_xmit_err += drops;
481 u64_stats_update_end(&rq->stats.syncp);
490 static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
491 struct xdp_frame **frames, u32 flags)
495 err = veth_xdp_xmit(dev, n, frames, flags, true);
497 struct veth_priv *priv = netdev_priv(dev);
499 atomic64_add(n, &priv->dropped);
505 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
507 int sent, i, err = 0;
509 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
513 for (i = 0; i < bq->count; i++)
514 xdp_return_frame(bq->q[i]);
516 trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
518 u64_stats_update_begin(&rq->stats.syncp);
519 rq->stats.vs.xdp_tx += sent;
520 rq->stats.vs.xdp_tx_err += bq->count - sent;
521 u64_stats_update_end(&rq->stats.syncp);
526 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
528 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
529 struct net_device *rcv;
530 struct veth_rq *rcv_rq;
533 veth_xdp_flush_bq(rq, bq);
534 rcv = rcu_dereference(priv->peer);
538 rcv_priv = netdev_priv(rcv);
539 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
540 /* xdp_ring is initialized on receive side? */
541 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
544 __veth_xdp_flush(rcv_rq);
549 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
550 struct veth_xdp_tx_bq *bq)
552 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
554 if (unlikely(!frame))
557 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
558 veth_xdp_flush_bq(rq, bq);
560 bq->q[bq->count++] = frame;
565 static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
566 struct xdp_frame *frame,
567 struct veth_xdp_tx_bq *bq,
568 struct veth_stats *stats)
570 struct xdp_frame orig_frame;
571 struct bpf_prog *xdp_prog;
574 xdp_prog = rcu_dereference(rq->xdp_prog);
575 if (likely(xdp_prog)) {
579 xdp_convert_frame_to_buff(frame, &xdp);
580 xdp.rxq = &rq->xdp_rxq;
582 act = bpf_prog_run_xdp(xdp_prog, &xdp);
586 if (xdp_update_frame_from_buff(&xdp, frame))
591 xdp.rxq->mem = frame->mem;
592 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
593 trace_xdp_exception(rq->dev, xdp_prog, act);
603 xdp.rxq->mem = frame->mem;
604 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
609 stats->xdp_redirect++;
613 bpf_warn_invalid_xdp_action(act);
616 trace_xdp_exception(rq->dev, xdp_prog, act);
628 xdp_return_frame(frame);
633 /* frames array contains VETH_XDP_BATCH at most */
634 static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
635 int n_xdpf, struct veth_xdp_tx_bq *bq,
636 struct veth_stats *stats)
638 void *skbs[VETH_XDP_BATCH];
641 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
642 GFP_ATOMIC | __GFP_ZERO) < 0) {
643 for (i = 0; i < n_xdpf; i++)
644 xdp_return_frame(frames[i]);
645 stats->rx_drops += n_xdpf;
650 for (i = 0; i < n_xdpf; i++) {
651 struct sk_buff *skb = skbs[i];
653 skb = __xdp_build_skb_from_frame(frames[i], skb,
656 xdp_return_frame(frames[i]);
660 napi_gro_receive(&rq->xdp_napi, skb);
664 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
666 struct veth_xdp_tx_bq *bq,
667 struct veth_stats *stats)
669 u32 pktlen, headroom, act, metalen, frame_sz;
670 void *orig_data, *orig_data_end;
671 struct bpf_prog *xdp_prog;
672 int mac_len, delta, off;
678 xdp_prog = rcu_dereference(rq->xdp_prog);
679 if (unlikely(!xdp_prog)) {
684 mac_len = skb->data - skb_mac_header(skb);
685 pktlen = skb->len + mac_len;
686 headroom = skb_headroom(skb) - mac_len;
688 if (skb_shared(skb) || skb_head_is_locked(skb) ||
689 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
690 struct sk_buff *nskb;
695 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
696 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
697 if (size > PAGE_SIZE)
700 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
704 head = page_address(page);
705 start = head + VETH_XDP_HEADROOM;
706 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
707 page_frag_free(head);
711 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
712 skb->len, PAGE_SIZE);
714 page_frag_free(head);
718 skb_copy_header(nskb, skb);
719 head_off = skb_headroom(nskb) - skb_headroom(skb);
720 skb_headers_offset_update(nskb, head_off);
725 /* SKB "head" area always have tailroom for skb_shared_info */
726 frame_sz = skb_end_pointer(skb) - skb->head;
727 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
728 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
729 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
731 orig_data = xdp.data;
732 orig_data_end = xdp.data_end;
734 act = bpf_prog_run_xdp(xdp_prog, &xdp);
740 get_page(virt_to_page(xdp.data));
742 xdp.rxq->mem = rq->xdp_mem;
743 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
744 trace_xdp_exception(rq->dev, xdp_prog, act);
752 get_page(virt_to_page(xdp.data));
754 xdp.rxq->mem = rq->xdp_mem;
755 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
759 stats->xdp_redirect++;
763 bpf_warn_invalid_xdp_action(act);
766 trace_xdp_exception(rq->dev, xdp_prog, act);
774 /* check if bpf_xdp_adjust_head was used */
775 delta = orig_data - xdp.data;
776 off = mac_len + delta;
778 __skb_push(skb, off);
780 __skb_pull(skb, -off);
781 skb->mac_header -= delta;
783 /* check if bpf_xdp_adjust_tail was used */
784 off = xdp.data_end - orig_data_end;
786 __skb_put(skb, off); /* positive on grow, negative on shrink */
787 skb->protocol = eth_type_trans(skb, rq->dev);
789 metalen = xdp.data - xdp.data_meta;
791 skb_metadata_set(skb, metalen);
802 page_frag_free(xdp.data);
807 static int veth_xdp_rcv(struct veth_rq *rq, int budget,
808 struct veth_xdp_tx_bq *bq,
809 struct veth_stats *stats)
811 int i, done = 0, n_xdpf = 0;
812 void *xdpf[VETH_XDP_BATCH];
814 for (i = 0; i < budget; i++) {
815 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
820 if (veth_is_xdp_frame(ptr)) {
822 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
824 stats->xdp_bytes += frame->len;
825 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
828 xdpf[n_xdpf++] = frame;
829 if (n_xdpf == VETH_XDP_BATCH) {
830 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
837 struct sk_buff *skb = ptr;
839 stats->xdp_bytes += skb->len;
840 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
842 napi_gro_receive(&rq->xdp_napi, skb);
848 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
850 u64_stats_update_begin(&rq->stats.syncp);
851 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
852 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
853 rq->stats.vs.xdp_drops += stats->xdp_drops;
854 rq->stats.vs.rx_drops += stats->rx_drops;
855 rq->stats.vs.xdp_packets += done;
856 u64_stats_update_end(&rq->stats.syncp);
861 static int veth_poll(struct napi_struct *napi, int budget)
864 container_of(napi, struct veth_rq, xdp_napi);
865 struct veth_stats stats = {};
866 struct veth_xdp_tx_bq bq;
871 xdp_set_return_frame_no_direct();
872 done = veth_xdp_rcv(rq, budget, &bq, &stats);
874 if (done < budget && napi_complete_done(napi, done)) {
875 /* Write rx_notify_masked before reading ptr_ring */
876 smp_store_mb(rq->rx_notify_masked, false);
877 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
878 rq->rx_notify_masked = true;
879 napi_schedule(&rq->xdp_napi);
883 if (stats.xdp_tx > 0)
884 veth_xdp_flush(rq, &bq);
885 if (stats.xdp_redirect > 0)
887 xdp_clear_return_frame_no_direct();
892 static int veth_napi_add(struct net_device *dev)
894 struct veth_priv *priv = netdev_priv(dev);
897 for (i = 0; i < dev->real_num_rx_queues; i++) {
898 struct veth_rq *rq = &priv->rq[i];
900 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
905 for (i = 0; i < dev->real_num_rx_queues; i++) {
906 struct veth_rq *rq = &priv->rq[i];
908 napi_enable(&rq->xdp_napi);
913 for (i--; i >= 0; i--)
914 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
919 static void veth_napi_del(struct net_device *dev)
921 struct veth_priv *priv = netdev_priv(dev);
924 for (i = 0; i < dev->real_num_rx_queues; i++) {
925 struct veth_rq *rq = &priv->rq[i];
927 napi_disable(&rq->xdp_napi);
928 __netif_napi_del(&rq->xdp_napi);
932 for (i = 0; i < dev->real_num_rx_queues; i++) {
933 struct veth_rq *rq = &priv->rq[i];
935 rq->rx_notify_masked = false;
936 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
940 static int veth_enable_xdp(struct net_device *dev)
942 struct veth_priv *priv = netdev_priv(dev);
945 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
946 for (i = 0; i < dev->real_num_rx_queues; i++) {
947 struct veth_rq *rq = &priv->rq[i];
949 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
950 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
954 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
955 MEM_TYPE_PAGE_SHARED,
960 /* Save original mem info as it can be overwritten */
961 rq->xdp_mem = rq->xdp_rxq.mem;
964 err = veth_napi_add(dev);
969 for (i = 0; i < dev->real_num_rx_queues; i++)
970 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
974 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
976 for (i--; i >= 0; i--) {
977 struct veth_rq *rq = &priv->rq[i];
979 xdp_rxq_info_unreg(&rq->xdp_rxq);
980 netif_napi_del(&rq->xdp_napi);
986 static void veth_disable_xdp(struct net_device *dev)
988 struct veth_priv *priv = netdev_priv(dev);
991 for (i = 0; i < dev->real_num_rx_queues; i++)
992 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
994 for (i = 0; i < dev->real_num_rx_queues; i++) {
995 struct veth_rq *rq = &priv->rq[i];
997 rq->xdp_rxq.mem = rq->xdp_mem;
998 xdp_rxq_info_unreg(&rq->xdp_rxq);
1002 static int veth_open(struct net_device *dev)
1004 struct veth_priv *priv = netdev_priv(dev);
1005 struct net_device *peer = rtnl_dereference(priv->peer);
1011 if (priv->_xdp_prog) {
1012 err = veth_enable_xdp(dev);
1017 if (peer->flags & IFF_UP) {
1018 netif_carrier_on(dev);
1019 netif_carrier_on(peer);
1025 static int veth_close(struct net_device *dev)
1027 struct veth_priv *priv = netdev_priv(dev);
1028 struct net_device *peer = rtnl_dereference(priv->peer);
1030 netif_carrier_off(dev);
1032 netif_carrier_off(peer);
1034 if (priv->_xdp_prog)
1035 veth_disable_xdp(dev);
1040 static int is_valid_veth_mtu(int mtu)
1042 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1045 static int veth_alloc_queues(struct net_device *dev)
1047 struct veth_priv *priv = netdev_priv(dev);
1050 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1054 for (i = 0; i < dev->num_rx_queues; i++) {
1055 priv->rq[i].dev = dev;
1056 u64_stats_init(&priv->rq[i].stats.syncp);
1062 static void veth_free_queues(struct net_device *dev)
1064 struct veth_priv *priv = netdev_priv(dev);
1069 static int veth_dev_init(struct net_device *dev)
1073 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1077 err = veth_alloc_queues(dev);
1079 free_percpu(dev->lstats);
1086 static void veth_dev_free(struct net_device *dev)
1088 veth_free_queues(dev);
1089 free_percpu(dev->lstats);
1092 #ifdef CONFIG_NET_POLL_CONTROLLER
1093 static void veth_poll_controller(struct net_device *dev)
1095 /* veth only receives frames when its peer sends one
1096 * Since it has nothing to do with disabling irqs, we are guaranteed
1097 * never to have pending data when we poll for it so
1098 * there is nothing to do here.
1100 * We need this though so netpoll recognizes us as an interface that
1101 * supports polling, which enables bridge devices in virt setups to
1102 * still use netconsole
1105 #endif /* CONFIG_NET_POLL_CONTROLLER */
1107 static int veth_get_iflink(const struct net_device *dev)
1109 struct veth_priv *priv = netdev_priv(dev);
1110 struct net_device *peer;
1114 peer = rcu_dereference(priv->peer);
1115 iflink = peer ? peer->ifindex : 0;
1121 static netdev_features_t veth_fix_features(struct net_device *dev,
1122 netdev_features_t features)
1124 struct veth_priv *priv = netdev_priv(dev);
1125 struct net_device *peer;
1127 peer = rtnl_dereference(priv->peer);
1129 struct veth_priv *peer_priv = netdev_priv(peer);
1131 if (peer_priv->_xdp_prog)
1132 features &= ~NETIF_F_GSO_SOFTWARE;
1138 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1140 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1141 struct net_device *peer;
1147 peer = rcu_dereference(priv->peer);
1148 if (unlikely(!peer))
1151 peer_priv = netdev_priv(peer);
1152 priv->requested_headroom = new_hr;
1153 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1154 dev->needed_headroom = new_hr;
1155 peer->needed_headroom = new_hr;
1161 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1162 struct netlink_ext_ack *extack)
1164 struct veth_priv *priv = netdev_priv(dev);
1165 struct bpf_prog *old_prog;
1166 struct net_device *peer;
1167 unsigned int max_mtu;
1170 old_prog = priv->_xdp_prog;
1171 priv->_xdp_prog = prog;
1172 peer = rtnl_dereference(priv->peer);
1176 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1181 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1182 peer->hard_header_len -
1183 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1184 if (peer->mtu > max_mtu) {
1185 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1190 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1191 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1196 if (dev->flags & IFF_UP) {
1197 err = veth_enable_xdp(dev);
1199 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1205 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1206 peer->max_mtu = max_mtu;
1212 if (dev->flags & IFF_UP)
1213 veth_disable_xdp(dev);
1216 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1217 peer->max_mtu = ETH_MAX_MTU;
1220 bpf_prog_put(old_prog);
1223 if ((!!old_prog ^ !!prog) && peer)
1224 netdev_update_features(peer);
1228 priv->_xdp_prog = old_prog;
1233 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1235 switch (xdp->command) {
1236 case XDP_SETUP_PROG:
1237 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1243 static const struct net_device_ops veth_netdev_ops = {
1244 .ndo_init = veth_dev_init,
1245 .ndo_open = veth_open,
1246 .ndo_stop = veth_close,
1247 .ndo_start_xmit = veth_xmit,
1248 .ndo_get_stats64 = veth_get_stats64,
1249 .ndo_set_rx_mode = veth_set_multicast_list,
1250 .ndo_set_mac_address = eth_mac_addr,
1251 #ifdef CONFIG_NET_POLL_CONTROLLER
1252 .ndo_poll_controller = veth_poll_controller,
1254 .ndo_get_iflink = veth_get_iflink,
1255 .ndo_fix_features = veth_fix_features,
1256 .ndo_features_check = passthru_features_check,
1257 .ndo_set_rx_headroom = veth_set_rx_headroom,
1258 .ndo_bpf = veth_xdp,
1259 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
1260 .ndo_get_peer_dev = veth_peer_dev,
1263 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1264 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1265 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1266 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1267 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1269 static void veth_setup(struct net_device *dev)
1273 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1274 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1275 dev->priv_flags |= IFF_NO_QUEUE;
1276 dev->priv_flags |= IFF_PHONY_HEADROOM;
1278 dev->netdev_ops = &veth_netdev_ops;
1279 dev->ethtool_ops = &veth_ethtool_ops;
1280 dev->features |= NETIF_F_LLTX;
1281 dev->features |= VETH_FEATURES;
1282 dev->vlan_features = dev->features &
1283 ~(NETIF_F_HW_VLAN_CTAG_TX |
1284 NETIF_F_HW_VLAN_STAG_TX |
1285 NETIF_F_HW_VLAN_CTAG_RX |
1286 NETIF_F_HW_VLAN_STAG_RX);
1287 dev->needs_free_netdev = true;
1288 dev->priv_destructor = veth_dev_free;
1289 dev->max_mtu = ETH_MAX_MTU;
1291 dev->hw_features = VETH_FEATURES;
1292 dev->hw_enc_features = VETH_FEATURES;
1293 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1300 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1301 struct netlink_ext_ack *extack)
1303 if (tb[IFLA_ADDRESS]) {
1304 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1306 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1307 return -EADDRNOTAVAIL;
1310 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1316 static struct rtnl_link_ops veth_link_ops;
1318 static int veth_newlink(struct net *src_net, struct net_device *dev,
1319 struct nlattr *tb[], struct nlattr *data[],
1320 struct netlink_ext_ack *extack)
1323 struct net_device *peer;
1324 struct veth_priv *priv;
1325 char ifname[IFNAMSIZ];
1326 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1327 unsigned char name_assign_type;
1328 struct ifinfomsg *ifmp;
1332 * create and register peer first
1334 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1335 struct nlattr *nla_peer;
1337 nla_peer = data[VETH_INFO_PEER];
1338 ifmp = nla_data(nla_peer);
1339 err = rtnl_nla_parse_ifla(peer_tb,
1340 nla_data(nla_peer) + sizeof(struct ifinfomsg),
1341 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1346 err = veth_validate(peer_tb, NULL, extack);
1356 if (ifmp && tbp[IFLA_IFNAME]) {
1357 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1358 name_assign_type = NET_NAME_USER;
1360 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1361 name_assign_type = NET_NAME_ENUM;
1364 net = rtnl_link_get_net(src_net, tbp);
1366 return PTR_ERR(net);
1368 peer = rtnl_create_link(net, ifname, name_assign_type,
1369 &veth_link_ops, tbp, extack);
1372 return PTR_ERR(peer);
1375 if (!ifmp || !tbp[IFLA_ADDRESS])
1376 eth_hw_addr_random(peer);
1378 if (ifmp && (dev->ifindex != 0))
1379 peer->ifindex = ifmp->ifi_index;
1381 peer->gso_max_size = dev->gso_max_size;
1382 peer->gso_max_segs = dev->gso_max_segs;
1384 err = register_netdevice(peer);
1388 goto err_register_peer;
1390 netif_carrier_off(peer);
1392 err = rtnl_configure_link(peer, ifmp);
1394 goto err_configure_peer;
1399 * note, that since we've registered new device the dev's name
1400 * should be re-allocated
1403 if (tb[IFLA_ADDRESS] == NULL)
1404 eth_hw_addr_random(dev);
1406 if (tb[IFLA_IFNAME])
1407 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1409 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1411 err = register_netdevice(dev);
1413 goto err_register_dev;
1415 netif_carrier_off(dev);
1418 * tie the deviced together
1421 priv = netdev_priv(dev);
1422 rcu_assign_pointer(priv->peer, peer);
1424 priv = netdev_priv(peer);
1425 rcu_assign_pointer(priv->peer, dev);
1432 unregister_netdevice(peer);
1440 static void veth_dellink(struct net_device *dev, struct list_head *head)
1442 struct veth_priv *priv;
1443 struct net_device *peer;
1445 priv = netdev_priv(dev);
1446 peer = rtnl_dereference(priv->peer);
1448 /* Note : dellink() is called from default_device_exit_batch(),
1449 * before a rcu_synchronize() point. The devices are guaranteed
1450 * not being freed before one RCU grace period.
1452 RCU_INIT_POINTER(priv->peer, NULL);
1453 unregister_netdevice_queue(dev, head);
1456 priv = netdev_priv(peer);
1457 RCU_INIT_POINTER(priv->peer, NULL);
1458 unregister_netdevice_queue(peer, head);
1462 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1463 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1466 static struct net *veth_get_link_net(const struct net_device *dev)
1468 struct veth_priv *priv = netdev_priv(dev);
1469 struct net_device *peer = rtnl_dereference(priv->peer);
1471 return peer ? dev_net(peer) : dev_net(dev);
1474 static struct rtnl_link_ops veth_link_ops = {
1476 .priv_size = sizeof(struct veth_priv),
1477 .setup = veth_setup,
1478 .validate = veth_validate,
1479 .newlink = veth_newlink,
1480 .dellink = veth_dellink,
1481 .policy = veth_policy,
1482 .maxtype = VETH_INFO_MAX,
1483 .get_link_net = veth_get_link_net,
1490 static __init int veth_init(void)
1492 return rtnl_link_register(&veth_link_ops);
1495 static __exit void veth_exit(void)
1497 rtnl_link_unregister(&veth_link_ops);
1500 module_init(veth_init);
1501 module_exit(veth_exit);
1503 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1504 MODULE_LICENSE("GPL v2");
1505 MODULE_ALIAS_RTNL_LINK(DRV_NAME);