Merge branch 'exec-update-lock-for-v5.11' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-microblaze.git] / drivers / net / veth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  drivers/net/veth.c
4  *
5  *  Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6  *
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9  *
10  */
11
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
17
18 #include <net/rtnetlink.h>
19 #include <net/dst.h>
20 #include <net/xfrm.h>
21 #include <net/xdp.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
29
30 #define DRV_NAME        "veth"
31 #define DRV_VERSION     "1.0"
32
33 #define VETH_XDP_FLAG           BIT(0)
34 #define VETH_RING_SIZE          256
35 #define VETH_XDP_HEADROOM       (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
37 #define VETH_XDP_TX_BULK_SIZE   16
38
39 struct veth_stats {
40         u64     rx_drops;
41         /* xdp */
42         u64     xdp_packets;
43         u64     xdp_bytes;
44         u64     xdp_redirect;
45         u64     xdp_drops;
46         u64     xdp_tx;
47         u64     xdp_tx_err;
48         u64     peer_tq_xdp_xmit;
49         u64     peer_tq_xdp_xmit_err;
50 };
51
52 struct veth_rq_stats {
53         struct veth_stats       vs;
54         struct u64_stats_sync   syncp;
55 };
56
57 struct veth_rq {
58         struct napi_struct      xdp_napi;
59         struct net_device       *dev;
60         struct bpf_prog __rcu   *xdp_prog;
61         struct xdp_mem_info     xdp_mem;
62         struct veth_rq_stats    stats;
63         bool                    rx_notify_masked;
64         struct ptr_ring         xdp_ring;
65         struct xdp_rxq_info     xdp_rxq;
66 };
67
68 struct veth_priv {
69         struct net_device __rcu *peer;
70         atomic64_t              dropped;
71         struct bpf_prog         *_xdp_prog;
72         struct veth_rq          *rq;
73         unsigned int            requested_headroom;
74 };
75
76 struct veth_xdp_tx_bq {
77         struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
78         unsigned int count;
79 };
80
81 /*
82  * ethtool interface
83  */
84
85 struct veth_q_stat_desc {
86         char    desc[ETH_GSTRING_LEN];
87         size_t  offset;
88 };
89
90 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
91
92 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
93         { "xdp_packets",        VETH_RQ_STAT(xdp_packets) },
94         { "xdp_bytes",          VETH_RQ_STAT(xdp_bytes) },
95         { "drops",              VETH_RQ_STAT(rx_drops) },
96         { "xdp_redirect",       VETH_RQ_STAT(xdp_redirect) },
97         { "xdp_drops",          VETH_RQ_STAT(xdp_drops) },
98         { "xdp_tx",             VETH_RQ_STAT(xdp_tx) },
99         { "xdp_tx_errors",      VETH_RQ_STAT(xdp_tx_err) },
100 };
101
102 #define VETH_RQ_STATS_LEN       ARRAY_SIZE(veth_rq_stats_desc)
103
104 static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
105         { "xdp_xmit",           VETH_RQ_STAT(peer_tq_xdp_xmit) },
106         { "xdp_xmit_errors",    VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
107 };
108
109 #define VETH_TQ_STATS_LEN       ARRAY_SIZE(veth_tq_stats_desc)
110
111 static struct {
112         const char string[ETH_GSTRING_LEN];
113 } ethtool_stats_keys[] = {
114         { "peer_ifindex" },
115 };
116
117 static int veth_get_link_ksettings(struct net_device *dev,
118                                    struct ethtool_link_ksettings *cmd)
119 {
120         cmd->base.speed         = SPEED_10000;
121         cmd->base.duplex        = DUPLEX_FULL;
122         cmd->base.port          = PORT_TP;
123         cmd->base.autoneg       = AUTONEG_DISABLE;
124         return 0;
125 }
126
127 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
128 {
129         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
130         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
131 }
132
133 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
134 {
135         char *p = (char *)buf;
136         int i, j;
137
138         switch(stringset) {
139         case ETH_SS_STATS:
140                 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
141                 p += sizeof(ethtool_stats_keys);
142                 for (i = 0; i < dev->real_num_rx_queues; i++) {
143                         for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
144                                 snprintf(p, ETH_GSTRING_LEN,
145                                          "rx_queue_%u_%.18s",
146                                          i, veth_rq_stats_desc[j].desc);
147                                 p += ETH_GSTRING_LEN;
148                         }
149                 }
150                 for (i = 0; i < dev->real_num_tx_queues; i++) {
151                         for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
152                                 snprintf(p, ETH_GSTRING_LEN,
153                                          "tx_queue_%u_%.18s",
154                                          i, veth_tq_stats_desc[j].desc);
155                                 p += ETH_GSTRING_LEN;
156                         }
157                 }
158                 break;
159         }
160 }
161
162 static int veth_get_sset_count(struct net_device *dev, int sset)
163 {
164         switch (sset) {
165         case ETH_SS_STATS:
166                 return ARRAY_SIZE(ethtool_stats_keys) +
167                        VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
168                        VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
169         default:
170                 return -EOPNOTSUPP;
171         }
172 }
173
174 static void veth_get_ethtool_stats(struct net_device *dev,
175                 struct ethtool_stats *stats, u64 *data)
176 {
177         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
178         struct net_device *peer = rtnl_dereference(priv->peer);
179         int i, j, idx;
180
181         data[0] = peer ? peer->ifindex : 0;
182         idx = 1;
183         for (i = 0; i < dev->real_num_rx_queues; i++) {
184                 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
185                 const void *stats_base = (void *)&rq_stats->vs;
186                 unsigned int start;
187                 size_t offset;
188
189                 do {
190                         start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
191                         for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
192                                 offset = veth_rq_stats_desc[j].offset;
193                                 data[idx + j] = *(u64 *)(stats_base + offset);
194                         }
195                 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
196                 idx += VETH_RQ_STATS_LEN;
197         }
198
199         if (!peer)
200                 return;
201
202         rcv_priv = netdev_priv(peer);
203         for (i = 0; i < peer->real_num_rx_queues; i++) {
204                 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
205                 const void *base = (void *)&rq_stats->vs;
206                 unsigned int start, tx_idx = idx;
207                 size_t offset;
208
209                 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
210                 do {
211                         start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
212                         for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
213                                 offset = veth_tq_stats_desc[j].offset;
214                                 data[tx_idx + j] += *(u64 *)(base + offset);
215                         }
216                 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
217         }
218 }
219
220 static const struct ethtool_ops veth_ethtool_ops = {
221         .get_drvinfo            = veth_get_drvinfo,
222         .get_link               = ethtool_op_get_link,
223         .get_strings            = veth_get_strings,
224         .get_sset_count         = veth_get_sset_count,
225         .get_ethtool_stats      = veth_get_ethtool_stats,
226         .get_link_ksettings     = veth_get_link_ksettings,
227         .get_ts_info            = ethtool_op_get_ts_info,
228 };
229
230 /* general routines */
231
232 static bool veth_is_xdp_frame(void *ptr)
233 {
234         return (unsigned long)ptr & VETH_XDP_FLAG;
235 }
236
237 static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
238 {
239         return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
240 }
241
242 static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
243 {
244         return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
245 }
246
247 static void veth_ptr_free(void *ptr)
248 {
249         if (veth_is_xdp_frame(ptr))
250                 xdp_return_frame(veth_ptr_to_xdp(ptr));
251         else
252                 kfree_skb(ptr);
253 }
254
255 static void __veth_xdp_flush(struct veth_rq *rq)
256 {
257         /* Write ptr_ring before reading rx_notify_masked */
258         smp_mb();
259         if (!rq->rx_notify_masked) {
260                 rq->rx_notify_masked = true;
261                 napi_schedule(&rq->xdp_napi);
262         }
263 }
264
265 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
266 {
267         if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
268                 dev_kfree_skb_any(skb);
269                 return NET_RX_DROP;
270         }
271
272         return NET_RX_SUCCESS;
273 }
274
275 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
276                             struct veth_rq *rq, bool xdp)
277 {
278         return __dev_forward_skb(dev, skb) ?: xdp ?
279                 veth_xdp_rx(rq, skb) :
280                 netif_rx(skb);
281 }
282
283 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
284 {
285         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
286         struct veth_rq *rq = NULL;
287         struct net_device *rcv;
288         int length = skb->len;
289         bool rcv_xdp = false;
290         int rxq;
291
292         rcu_read_lock();
293         rcv = rcu_dereference(priv->peer);
294         if (unlikely(!rcv)) {
295                 kfree_skb(skb);
296                 goto drop;
297         }
298
299         rcv_priv = netdev_priv(rcv);
300         rxq = skb_get_queue_mapping(skb);
301         if (rxq < rcv->real_num_rx_queues) {
302                 rq = &rcv_priv->rq[rxq];
303                 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
304                 if (rcv_xdp)
305                         skb_record_rx_queue(skb, rxq);
306         }
307
308         skb_tx_timestamp(skb);
309         if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
310                 if (!rcv_xdp)
311                         dev_lstats_add(dev, length);
312         } else {
313 drop:
314                 atomic64_inc(&priv->dropped);
315         }
316
317         if (rcv_xdp)
318                 __veth_xdp_flush(rq);
319
320         rcu_read_unlock();
321
322         return NETDEV_TX_OK;
323 }
324
325 static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
326 {
327         struct veth_priv *priv = netdev_priv(dev);
328
329         dev_lstats_read(dev, packets, bytes);
330         return atomic64_read(&priv->dropped);
331 }
332
333 static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
334 {
335         struct veth_priv *priv = netdev_priv(dev);
336         int i;
337
338         result->peer_tq_xdp_xmit_err = 0;
339         result->xdp_packets = 0;
340         result->xdp_tx_err = 0;
341         result->xdp_bytes = 0;
342         result->rx_drops = 0;
343         for (i = 0; i < dev->num_rx_queues; i++) {
344                 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
345                 struct veth_rq_stats *stats = &priv->rq[i].stats;
346                 unsigned int start;
347
348                 do {
349                         start = u64_stats_fetch_begin_irq(&stats->syncp);
350                         peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
351                         xdp_tx_err = stats->vs.xdp_tx_err;
352                         packets = stats->vs.xdp_packets;
353                         bytes = stats->vs.xdp_bytes;
354                         drops = stats->vs.rx_drops;
355                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
356                 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
357                 result->xdp_tx_err += xdp_tx_err;
358                 result->xdp_packets += packets;
359                 result->xdp_bytes += bytes;
360                 result->rx_drops += drops;
361         }
362 }
363
364 static void veth_get_stats64(struct net_device *dev,
365                              struct rtnl_link_stats64 *tot)
366 {
367         struct veth_priv *priv = netdev_priv(dev);
368         struct net_device *peer;
369         struct veth_stats rx;
370         u64 packets, bytes;
371
372         tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
373         tot->tx_bytes = bytes;
374         tot->tx_packets = packets;
375
376         veth_stats_rx(&rx, dev);
377         tot->tx_dropped += rx.xdp_tx_err;
378         tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
379         tot->rx_bytes = rx.xdp_bytes;
380         tot->rx_packets = rx.xdp_packets;
381
382         rcu_read_lock();
383         peer = rcu_dereference(priv->peer);
384         if (peer) {
385                 veth_stats_tx(peer, &packets, &bytes);
386                 tot->rx_bytes += bytes;
387                 tot->rx_packets += packets;
388
389                 veth_stats_rx(&rx, peer);
390                 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
391                 tot->rx_dropped += rx.xdp_tx_err;
392                 tot->tx_bytes += rx.xdp_bytes;
393                 tot->tx_packets += rx.xdp_packets;
394         }
395         rcu_read_unlock();
396 }
397
398 /* fake multicast ability */
399 static void veth_set_multicast_list(struct net_device *dev)
400 {
401 }
402
403 static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
404                                       int buflen)
405 {
406         struct sk_buff *skb;
407
408         skb = build_skb(head, buflen);
409         if (!skb)
410                 return NULL;
411
412         skb_reserve(skb, headroom);
413         skb_put(skb, len);
414
415         return skb;
416 }
417
418 static int veth_select_rxq(struct net_device *dev)
419 {
420         return smp_processor_id() % dev->real_num_rx_queues;
421 }
422
423 static struct net_device *veth_peer_dev(struct net_device *dev)
424 {
425         struct veth_priv *priv = netdev_priv(dev);
426
427         /* Callers must be under RCU read side. */
428         return rcu_dereference(priv->peer);
429 }
430
431 static int veth_xdp_xmit(struct net_device *dev, int n,
432                          struct xdp_frame **frames,
433                          u32 flags, bool ndo_xmit)
434 {
435         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
436         int i, ret = -ENXIO, drops = 0;
437         struct net_device *rcv;
438         unsigned int max_len;
439         struct veth_rq *rq;
440
441         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
442                 return -EINVAL;
443
444         rcu_read_lock();
445         rcv = rcu_dereference(priv->peer);
446         if (unlikely(!rcv))
447                 goto out;
448
449         rcv_priv = netdev_priv(rcv);
450         rq = &rcv_priv->rq[veth_select_rxq(rcv)];
451         /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
452          * side. This means an XDP program is loaded on the peer and the peer
453          * device is up.
454          */
455         if (!rcu_access_pointer(rq->xdp_prog))
456                 goto out;
457
458         max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
459
460         spin_lock(&rq->xdp_ring.producer_lock);
461         for (i = 0; i < n; i++) {
462                 struct xdp_frame *frame = frames[i];
463                 void *ptr = veth_xdp_to_ptr(frame);
464
465                 if (unlikely(frame->len > max_len ||
466                              __ptr_ring_produce(&rq->xdp_ring, ptr))) {
467                         xdp_return_frame_rx_napi(frame);
468                         drops++;
469                 }
470         }
471         spin_unlock(&rq->xdp_ring.producer_lock);
472
473         if (flags & XDP_XMIT_FLUSH)
474                 __veth_xdp_flush(rq);
475
476         ret = n - drops;
477         if (ndo_xmit) {
478                 u64_stats_update_begin(&rq->stats.syncp);
479                 rq->stats.vs.peer_tq_xdp_xmit += n - drops;
480                 rq->stats.vs.peer_tq_xdp_xmit_err += drops;
481                 u64_stats_update_end(&rq->stats.syncp);
482         }
483
484 out:
485         rcu_read_unlock();
486
487         return ret;
488 }
489
490 static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
491                              struct xdp_frame **frames, u32 flags)
492 {
493         int err;
494
495         err = veth_xdp_xmit(dev, n, frames, flags, true);
496         if (err < 0) {
497                 struct veth_priv *priv = netdev_priv(dev);
498
499                 atomic64_add(n, &priv->dropped);
500         }
501
502         return err;
503 }
504
505 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
506 {
507         int sent, i, err = 0;
508
509         sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
510         if (sent < 0) {
511                 err = sent;
512                 sent = 0;
513                 for (i = 0; i < bq->count; i++)
514                         xdp_return_frame(bq->q[i]);
515         }
516         trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
517
518         u64_stats_update_begin(&rq->stats.syncp);
519         rq->stats.vs.xdp_tx += sent;
520         rq->stats.vs.xdp_tx_err += bq->count - sent;
521         u64_stats_update_end(&rq->stats.syncp);
522
523         bq->count = 0;
524 }
525
526 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
527 {
528         struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
529         struct net_device *rcv;
530         struct veth_rq *rcv_rq;
531
532         rcu_read_lock();
533         veth_xdp_flush_bq(rq, bq);
534         rcv = rcu_dereference(priv->peer);
535         if (unlikely(!rcv))
536                 goto out;
537
538         rcv_priv = netdev_priv(rcv);
539         rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
540         /* xdp_ring is initialized on receive side? */
541         if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
542                 goto out;
543
544         __veth_xdp_flush(rcv_rq);
545 out:
546         rcu_read_unlock();
547 }
548
549 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
550                        struct veth_xdp_tx_bq *bq)
551 {
552         struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
553
554         if (unlikely(!frame))
555                 return -EOVERFLOW;
556
557         if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
558                 veth_xdp_flush_bq(rq, bq);
559
560         bq->q[bq->count++] = frame;
561
562         return 0;
563 }
564
565 static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
566                                         struct xdp_frame *frame,
567                                         struct veth_xdp_tx_bq *bq,
568                                         struct veth_stats *stats)
569 {
570         void *hard_start = frame->data - frame->headroom;
571         int len = frame->len, delta = 0;
572         struct xdp_frame orig_frame;
573         struct bpf_prog *xdp_prog;
574         unsigned int headroom;
575         struct sk_buff *skb;
576
577         /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
578         hard_start -= sizeof(struct xdp_frame);
579
580         rcu_read_lock();
581         xdp_prog = rcu_dereference(rq->xdp_prog);
582         if (likely(xdp_prog)) {
583                 struct xdp_buff xdp;
584                 u32 act;
585
586                 xdp_convert_frame_to_buff(frame, &xdp);
587                 xdp.rxq = &rq->xdp_rxq;
588
589                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
590
591                 switch (act) {
592                 case XDP_PASS:
593                         delta = frame->data - xdp.data;
594                         len = xdp.data_end - xdp.data;
595                         break;
596                 case XDP_TX:
597                         orig_frame = *frame;
598                         xdp.rxq->mem = frame->mem;
599                         if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
600                                 trace_xdp_exception(rq->dev, xdp_prog, act);
601                                 frame = &orig_frame;
602                                 stats->rx_drops++;
603                                 goto err_xdp;
604                         }
605                         stats->xdp_tx++;
606                         rcu_read_unlock();
607                         goto xdp_xmit;
608                 case XDP_REDIRECT:
609                         orig_frame = *frame;
610                         xdp.rxq->mem = frame->mem;
611                         if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
612                                 frame = &orig_frame;
613                                 stats->rx_drops++;
614                                 goto err_xdp;
615                         }
616                         stats->xdp_redirect++;
617                         rcu_read_unlock();
618                         goto xdp_xmit;
619                 default:
620                         bpf_warn_invalid_xdp_action(act);
621                         fallthrough;
622                 case XDP_ABORTED:
623                         trace_xdp_exception(rq->dev, xdp_prog, act);
624                         fallthrough;
625                 case XDP_DROP:
626                         stats->xdp_drops++;
627                         goto err_xdp;
628                 }
629         }
630         rcu_read_unlock();
631
632         headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
633         skb = veth_build_skb(hard_start, headroom, len, frame->frame_sz);
634         if (!skb) {
635                 xdp_return_frame(frame);
636                 stats->rx_drops++;
637                 goto err;
638         }
639
640         xdp_release_frame(frame);
641         xdp_scrub_frame(frame);
642         skb->protocol = eth_type_trans(skb, rq->dev);
643 err:
644         return skb;
645 err_xdp:
646         rcu_read_unlock();
647         xdp_return_frame(frame);
648 xdp_xmit:
649         return NULL;
650 }
651
652 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
653                                         struct sk_buff *skb,
654                                         struct veth_xdp_tx_bq *bq,
655                                         struct veth_stats *stats)
656 {
657         u32 pktlen, headroom, act, metalen;
658         void *orig_data, *orig_data_end;
659         struct bpf_prog *xdp_prog;
660         int mac_len, delta, off;
661         struct xdp_buff xdp;
662
663         skb_orphan(skb);
664
665         rcu_read_lock();
666         xdp_prog = rcu_dereference(rq->xdp_prog);
667         if (unlikely(!xdp_prog)) {
668                 rcu_read_unlock();
669                 goto out;
670         }
671
672         mac_len = skb->data - skb_mac_header(skb);
673         pktlen = skb->len + mac_len;
674         headroom = skb_headroom(skb) - mac_len;
675
676         if (skb_shared(skb) || skb_head_is_locked(skb) ||
677             skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
678                 struct sk_buff *nskb;
679                 int size, head_off;
680                 void *head, *start;
681                 struct page *page;
682
683                 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
684                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
685                 if (size > PAGE_SIZE)
686                         goto drop;
687
688                 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
689                 if (!page)
690                         goto drop;
691
692                 head = page_address(page);
693                 start = head + VETH_XDP_HEADROOM;
694                 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
695                         page_frag_free(head);
696                         goto drop;
697                 }
698
699                 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
700                                       skb->len, PAGE_SIZE);
701                 if (!nskb) {
702                         page_frag_free(head);
703                         goto drop;
704                 }
705
706                 skb_copy_header(nskb, skb);
707                 head_off = skb_headroom(nskb) - skb_headroom(skb);
708                 skb_headers_offset_update(nskb, head_off);
709                 consume_skb(skb);
710                 skb = nskb;
711         }
712
713         xdp.data_hard_start = skb->head;
714         xdp.data = skb_mac_header(skb);
715         xdp.data_end = xdp.data + pktlen;
716         xdp.data_meta = xdp.data;
717         xdp.rxq = &rq->xdp_rxq;
718
719         /* SKB "head" area always have tailroom for skb_shared_info */
720         xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
721         xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
722
723         orig_data = xdp.data;
724         orig_data_end = xdp.data_end;
725
726         act = bpf_prog_run_xdp(xdp_prog, &xdp);
727
728         switch (act) {
729         case XDP_PASS:
730                 break;
731         case XDP_TX:
732                 get_page(virt_to_page(xdp.data));
733                 consume_skb(skb);
734                 xdp.rxq->mem = rq->xdp_mem;
735                 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
736                         trace_xdp_exception(rq->dev, xdp_prog, act);
737                         stats->rx_drops++;
738                         goto err_xdp;
739                 }
740                 stats->xdp_tx++;
741                 rcu_read_unlock();
742                 goto xdp_xmit;
743         case XDP_REDIRECT:
744                 get_page(virt_to_page(xdp.data));
745                 consume_skb(skb);
746                 xdp.rxq->mem = rq->xdp_mem;
747                 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
748                         stats->rx_drops++;
749                         goto err_xdp;
750                 }
751                 stats->xdp_redirect++;
752                 rcu_read_unlock();
753                 goto xdp_xmit;
754         default:
755                 bpf_warn_invalid_xdp_action(act);
756                 fallthrough;
757         case XDP_ABORTED:
758                 trace_xdp_exception(rq->dev, xdp_prog, act);
759                 fallthrough;
760         case XDP_DROP:
761                 stats->xdp_drops++;
762                 goto xdp_drop;
763         }
764         rcu_read_unlock();
765
766         /* check if bpf_xdp_adjust_head was used */
767         delta = orig_data - xdp.data;
768         off = mac_len + delta;
769         if (off > 0)
770                 __skb_push(skb, off);
771         else if (off < 0)
772                 __skb_pull(skb, -off);
773         skb->mac_header -= delta;
774
775         /* check if bpf_xdp_adjust_tail was used */
776         off = xdp.data_end - orig_data_end;
777         if (off != 0)
778                 __skb_put(skb, off); /* positive on grow, negative on shrink */
779         skb->protocol = eth_type_trans(skb, rq->dev);
780
781         metalen = xdp.data - xdp.data_meta;
782         if (metalen)
783                 skb_metadata_set(skb, metalen);
784 out:
785         return skb;
786 drop:
787         stats->rx_drops++;
788 xdp_drop:
789         rcu_read_unlock();
790         kfree_skb(skb);
791         return NULL;
792 err_xdp:
793         rcu_read_unlock();
794         page_frag_free(xdp.data);
795 xdp_xmit:
796         return NULL;
797 }
798
799 static int veth_xdp_rcv(struct veth_rq *rq, int budget,
800                         struct veth_xdp_tx_bq *bq,
801                         struct veth_stats *stats)
802 {
803         int i, done = 0;
804
805         for (i = 0; i < budget; i++) {
806                 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
807                 struct sk_buff *skb;
808
809                 if (!ptr)
810                         break;
811
812                 if (veth_is_xdp_frame(ptr)) {
813                         struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
814
815                         stats->xdp_bytes += frame->len;
816                         skb = veth_xdp_rcv_one(rq, frame, bq, stats);
817                 } else {
818                         skb = ptr;
819                         stats->xdp_bytes += skb->len;
820                         skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
821                 }
822
823                 if (skb)
824                         napi_gro_receive(&rq->xdp_napi, skb);
825
826                 done++;
827         }
828
829         u64_stats_update_begin(&rq->stats.syncp);
830         rq->stats.vs.xdp_redirect += stats->xdp_redirect;
831         rq->stats.vs.xdp_bytes += stats->xdp_bytes;
832         rq->stats.vs.xdp_drops += stats->xdp_drops;
833         rq->stats.vs.rx_drops += stats->rx_drops;
834         rq->stats.vs.xdp_packets += done;
835         u64_stats_update_end(&rq->stats.syncp);
836
837         return done;
838 }
839
840 static int veth_poll(struct napi_struct *napi, int budget)
841 {
842         struct veth_rq *rq =
843                 container_of(napi, struct veth_rq, xdp_napi);
844         struct veth_stats stats = {};
845         struct veth_xdp_tx_bq bq;
846         int done;
847
848         bq.count = 0;
849
850         xdp_set_return_frame_no_direct();
851         done = veth_xdp_rcv(rq, budget, &bq, &stats);
852
853         if (done < budget && napi_complete_done(napi, done)) {
854                 /* Write rx_notify_masked before reading ptr_ring */
855                 smp_store_mb(rq->rx_notify_masked, false);
856                 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
857                         rq->rx_notify_masked = true;
858                         napi_schedule(&rq->xdp_napi);
859                 }
860         }
861
862         if (stats.xdp_tx > 0)
863                 veth_xdp_flush(rq, &bq);
864         if (stats.xdp_redirect > 0)
865                 xdp_do_flush();
866         xdp_clear_return_frame_no_direct();
867
868         return done;
869 }
870
871 static int veth_napi_add(struct net_device *dev)
872 {
873         struct veth_priv *priv = netdev_priv(dev);
874         int err, i;
875
876         for (i = 0; i < dev->real_num_rx_queues; i++) {
877                 struct veth_rq *rq = &priv->rq[i];
878
879                 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
880                 if (err)
881                         goto err_xdp_ring;
882         }
883
884         for (i = 0; i < dev->real_num_rx_queues; i++) {
885                 struct veth_rq *rq = &priv->rq[i];
886
887                 napi_enable(&rq->xdp_napi);
888         }
889
890         return 0;
891 err_xdp_ring:
892         for (i--; i >= 0; i--)
893                 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
894
895         return err;
896 }
897
898 static void veth_napi_del(struct net_device *dev)
899 {
900         struct veth_priv *priv = netdev_priv(dev);
901         int i;
902
903         for (i = 0; i < dev->real_num_rx_queues; i++) {
904                 struct veth_rq *rq = &priv->rq[i];
905
906                 napi_disable(&rq->xdp_napi);
907                 __netif_napi_del(&rq->xdp_napi);
908         }
909         synchronize_net();
910
911         for (i = 0; i < dev->real_num_rx_queues; i++) {
912                 struct veth_rq *rq = &priv->rq[i];
913
914                 rq->rx_notify_masked = false;
915                 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
916         }
917 }
918
919 static int veth_enable_xdp(struct net_device *dev)
920 {
921         struct veth_priv *priv = netdev_priv(dev);
922         int err, i;
923
924         if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
925                 for (i = 0; i < dev->real_num_rx_queues; i++) {
926                         struct veth_rq *rq = &priv->rq[i];
927
928                         netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
929                         err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
930                         if (err < 0)
931                                 goto err_rxq_reg;
932
933                         err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
934                                                          MEM_TYPE_PAGE_SHARED,
935                                                          NULL);
936                         if (err < 0)
937                                 goto err_reg_mem;
938
939                         /* Save original mem info as it can be overwritten */
940                         rq->xdp_mem = rq->xdp_rxq.mem;
941                 }
942
943                 err = veth_napi_add(dev);
944                 if (err)
945                         goto err_rxq_reg;
946         }
947
948         for (i = 0; i < dev->real_num_rx_queues; i++)
949                 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
950
951         return 0;
952 err_reg_mem:
953         xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
954 err_rxq_reg:
955         for (i--; i >= 0; i--) {
956                 struct veth_rq *rq = &priv->rq[i];
957
958                 xdp_rxq_info_unreg(&rq->xdp_rxq);
959                 netif_napi_del(&rq->xdp_napi);
960         }
961
962         return err;
963 }
964
965 static void veth_disable_xdp(struct net_device *dev)
966 {
967         struct veth_priv *priv = netdev_priv(dev);
968         int i;
969
970         for (i = 0; i < dev->real_num_rx_queues; i++)
971                 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
972         veth_napi_del(dev);
973         for (i = 0; i < dev->real_num_rx_queues; i++) {
974                 struct veth_rq *rq = &priv->rq[i];
975
976                 rq->xdp_rxq.mem = rq->xdp_mem;
977                 xdp_rxq_info_unreg(&rq->xdp_rxq);
978         }
979 }
980
981 static int veth_open(struct net_device *dev)
982 {
983         struct veth_priv *priv = netdev_priv(dev);
984         struct net_device *peer = rtnl_dereference(priv->peer);
985         int err;
986
987         if (!peer)
988                 return -ENOTCONN;
989
990         if (priv->_xdp_prog) {
991                 err = veth_enable_xdp(dev);
992                 if (err)
993                         return err;
994         }
995
996         if (peer->flags & IFF_UP) {
997                 netif_carrier_on(dev);
998                 netif_carrier_on(peer);
999         }
1000
1001         return 0;
1002 }
1003
1004 static int veth_close(struct net_device *dev)
1005 {
1006         struct veth_priv *priv = netdev_priv(dev);
1007         struct net_device *peer = rtnl_dereference(priv->peer);
1008
1009         netif_carrier_off(dev);
1010         if (peer)
1011                 netif_carrier_off(peer);
1012
1013         if (priv->_xdp_prog)
1014                 veth_disable_xdp(dev);
1015
1016         return 0;
1017 }
1018
1019 static int is_valid_veth_mtu(int mtu)
1020 {
1021         return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1022 }
1023
1024 static int veth_alloc_queues(struct net_device *dev)
1025 {
1026         struct veth_priv *priv = netdev_priv(dev);
1027         int i;
1028
1029         priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1030         if (!priv->rq)
1031                 return -ENOMEM;
1032
1033         for (i = 0; i < dev->num_rx_queues; i++) {
1034                 priv->rq[i].dev = dev;
1035                 u64_stats_init(&priv->rq[i].stats.syncp);
1036         }
1037
1038         return 0;
1039 }
1040
1041 static void veth_free_queues(struct net_device *dev)
1042 {
1043         struct veth_priv *priv = netdev_priv(dev);
1044
1045         kfree(priv->rq);
1046 }
1047
1048 static int veth_dev_init(struct net_device *dev)
1049 {
1050         int err;
1051
1052         dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1053         if (!dev->lstats)
1054                 return -ENOMEM;
1055
1056         err = veth_alloc_queues(dev);
1057         if (err) {
1058                 free_percpu(dev->lstats);
1059                 return err;
1060         }
1061
1062         return 0;
1063 }
1064
1065 static void veth_dev_free(struct net_device *dev)
1066 {
1067         veth_free_queues(dev);
1068         free_percpu(dev->lstats);
1069 }
1070
1071 #ifdef CONFIG_NET_POLL_CONTROLLER
1072 static void veth_poll_controller(struct net_device *dev)
1073 {
1074         /* veth only receives frames when its peer sends one
1075          * Since it has nothing to do with disabling irqs, we are guaranteed
1076          * never to have pending data when we poll for it so
1077          * there is nothing to do here.
1078          *
1079          * We need this though so netpoll recognizes us as an interface that
1080          * supports polling, which enables bridge devices in virt setups to
1081          * still use netconsole
1082          */
1083 }
1084 #endif  /* CONFIG_NET_POLL_CONTROLLER */
1085
1086 static int veth_get_iflink(const struct net_device *dev)
1087 {
1088         struct veth_priv *priv = netdev_priv(dev);
1089         struct net_device *peer;
1090         int iflink;
1091
1092         rcu_read_lock();
1093         peer = rcu_dereference(priv->peer);
1094         iflink = peer ? peer->ifindex : 0;
1095         rcu_read_unlock();
1096
1097         return iflink;
1098 }
1099
1100 static netdev_features_t veth_fix_features(struct net_device *dev,
1101                                            netdev_features_t features)
1102 {
1103         struct veth_priv *priv = netdev_priv(dev);
1104         struct net_device *peer;
1105
1106         peer = rtnl_dereference(priv->peer);
1107         if (peer) {
1108                 struct veth_priv *peer_priv = netdev_priv(peer);
1109
1110                 if (peer_priv->_xdp_prog)
1111                         features &= ~NETIF_F_GSO_SOFTWARE;
1112         }
1113
1114         return features;
1115 }
1116
1117 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1118 {
1119         struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1120         struct net_device *peer;
1121
1122         if (new_hr < 0)
1123                 new_hr = 0;
1124
1125         rcu_read_lock();
1126         peer = rcu_dereference(priv->peer);
1127         if (unlikely(!peer))
1128                 goto out;
1129
1130         peer_priv = netdev_priv(peer);
1131         priv->requested_headroom = new_hr;
1132         new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1133         dev->needed_headroom = new_hr;
1134         peer->needed_headroom = new_hr;
1135
1136 out:
1137         rcu_read_unlock();
1138 }
1139
1140 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1141                         struct netlink_ext_ack *extack)
1142 {
1143         struct veth_priv *priv = netdev_priv(dev);
1144         struct bpf_prog *old_prog;
1145         struct net_device *peer;
1146         unsigned int max_mtu;
1147         int err;
1148
1149         old_prog = priv->_xdp_prog;
1150         priv->_xdp_prog = prog;
1151         peer = rtnl_dereference(priv->peer);
1152
1153         if (prog) {
1154                 if (!peer) {
1155                         NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1156                         err = -ENOTCONN;
1157                         goto err;
1158                 }
1159
1160                 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1161                           peer->hard_header_len -
1162                           SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1163                 if (peer->mtu > max_mtu) {
1164                         NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1165                         err = -ERANGE;
1166                         goto err;
1167                 }
1168
1169                 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1170                         NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1171                         err = -ENOSPC;
1172                         goto err;
1173                 }
1174
1175                 if (dev->flags & IFF_UP) {
1176                         err = veth_enable_xdp(dev);
1177                         if (err) {
1178                                 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1179                                 goto err;
1180                         }
1181                 }
1182
1183                 if (!old_prog) {
1184                         peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1185                         peer->max_mtu = max_mtu;
1186                 }
1187         }
1188
1189         if (old_prog) {
1190                 if (!prog) {
1191                         if (dev->flags & IFF_UP)
1192                                 veth_disable_xdp(dev);
1193
1194                         if (peer) {
1195                                 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1196                                 peer->max_mtu = ETH_MAX_MTU;
1197                         }
1198                 }
1199                 bpf_prog_put(old_prog);
1200         }
1201
1202         if ((!!old_prog ^ !!prog) && peer)
1203                 netdev_update_features(peer);
1204
1205         return 0;
1206 err:
1207         priv->_xdp_prog = old_prog;
1208
1209         return err;
1210 }
1211
1212 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1213 {
1214         switch (xdp->command) {
1215         case XDP_SETUP_PROG:
1216                 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1217         default:
1218                 return -EINVAL;
1219         }
1220 }
1221
1222 static const struct net_device_ops veth_netdev_ops = {
1223         .ndo_init            = veth_dev_init,
1224         .ndo_open            = veth_open,
1225         .ndo_stop            = veth_close,
1226         .ndo_start_xmit      = veth_xmit,
1227         .ndo_get_stats64     = veth_get_stats64,
1228         .ndo_set_rx_mode     = veth_set_multicast_list,
1229         .ndo_set_mac_address = eth_mac_addr,
1230 #ifdef CONFIG_NET_POLL_CONTROLLER
1231         .ndo_poll_controller    = veth_poll_controller,
1232 #endif
1233         .ndo_get_iflink         = veth_get_iflink,
1234         .ndo_fix_features       = veth_fix_features,
1235         .ndo_features_check     = passthru_features_check,
1236         .ndo_set_rx_headroom    = veth_set_rx_headroom,
1237         .ndo_bpf                = veth_xdp,
1238         .ndo_xdp_xmit           = veth_ndo_xdp_xmit,
1239         .ndo_get_peer_dev       = veth_peer_dev,
1240 };
1241
1242 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1243                        NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1244                        NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1245                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1246                        NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1247
1248 static void veth_setup(struct net_device *dev)
1249 {
1250         ether_setup(dev);
1251
1252         dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1253         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1254         dev->priv_flags |= IFF_NO_QUEUE;
1255         dev->priv_flags |= IFF_PHONY_HEADROOM;
1256
1257         dev->netdev_ops = &veth_netdev_ops;
1258         dev->ethtool_ops = &veth_ethtool_ops;
1259         dev->features |= NETIF_F_LLTX;
1260         dev->features |= VETH_FEATURES;
1261         dev->vlan_features = dev->features &
1262                              ~(NETIF_F_HW_VLAN_CTAG_TX |
1263                                NETIF_F_HW_VLAN_STAG_TX |
1264                                NETIF_F_HW_VLAN_CTAG_RX |
1265                                NETIF_F_HW_VLAN_STAG_RX);
1266         dev->needs_free_netdev = true;
1267         dev->priv_destructor = veth_dev_free;
1268         dev->max_mtu = ETH_MAX_MTU;
1269
1270         dev->hw_features = VETH_FEATURES;
1271         dev->hw_enc_features = VETH_FEATURES;
1272         dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1273 }
1274
1275 /*
1276  * netlink interface
1277  */
1278
1279 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1280                          struct netlink_ext_ack *extack)
1281 {
1282         if (tb[IFLA_ADDRESS]) {
1283                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1284                         return -EINVAL;
1285                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1286                         return -EADDRNOTAVAIL;
1287         }
1288         if (tb[IFLA_MTU]) {
1289                 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1290                         return -EINVAL;
1291         }
1292         return 0;
1293 }
1294
1295 static struct rtnl_link_ops veth_link_ops;
1296
1297 static int veth_newlink(struct net *src_net, struct net_device *dev,
1298                         struct nlattr *tb[], struct nlattr *data[],
1299                         struct netlink_ext_ack *extack)
1300 {
1301         int err;
1302         struct net_device *peer;
1303         struct veth_priv *priv;
1304         char ifname[IFNAMSIZ];
1305         struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1306         unsigned char name_assign_type;
1307         struct ifinfomsg *ifmp;
1308         struct net *net;
1309
1310         /*
1311          * create and register peer first
1312          */
1313         if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1314                 struct nlattr *nla_peer;
1315
1316                 nla_peer = data[VETH_INFO_PEER];
1317                 ifmp = nla_data(nla_peer);
1318                 err = rtnl_nla_parse_ifla(peer_tb,
1319                                           nla_data(nla_peer) + sizeof(struct ifinfomsg),
1320                                           nla_len(nla_peer) - sizeof(struct ifinfomsg),
1321                                           NULL);
1322                 if (err < 0)
1323                         return err;
1324
1325                 err = veth_validate(peer_tb, NULL, extack);
1326                 if (err < 0)
1327                         return err;
1328
1329                 tbp = peer_tb;
1330         } else {
1331                 ifmp = NULL;
1332                 tbp = tb;
1333         }
1334
1335         if (ifmp && tbp[IFLA_IFNAME]) {
1336                 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1337                 name_assign_type = NET_NAME_USER;
1338         } else {
1339                 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1340                 name_assign_type = NET_NAME_ENUM;
1341         }
1342
1343         net = rtnl_link_get_net(src_net, tbp);
1344         if (IS_ERR(net))
1345                 return PTR_ERR(net);
1346
1347         peer = rtnl_create_link(net, ifname, name_assign_type,
1348                                 &veth_link_ops, tbp, extack);
1349         if (IS_ERR(peer)) {
1350                 put_net(net);
1351                 return PTR_ERR(peer);
1352         }
1353
1354         if (!ifmp || !tbp[IFLA_ADDRESS])
1355                 eth_hw_addr_random(peer);
1356
1357         if (ifmp && (dev->ifindex != 0))
1358                 peer->ifindex = ifmp->ifi_index;
1359
1360         peer->gso_max_size = dev->gso_max_size;
1361         peer->gso_max_segs = dev->gso_max_segs;
1362
1363         err = register_netdevice(peer);
1364         put_net(net);
1365         net = NULL;
1366         if (err < 0)
1367                 goto err_register_peer;
1368
1369         netif_carrier_off(peer);
1370
1371         err = rtnl_configure_link(peer, ifmp);
1372         if (err < 0)
1373                 goto err_configure_peer;
1374
1375         /*
1376          * register dev last
1377          *
1378          * note, that since we've registered new device the dev's name
1379          * should be re-allocated
1380          */
1381
1382         if (tb[IFLA_ADDRESS] == NULL)
1383                 eth_hw_addr_random(dev);
1384
1385         if (tb[IFLA_IFNAME])
1386                 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1387         else
1388                 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1389
1390         err = register_netdevice(dev);
1391         if (err < 0)
1392                 goto err_register_dev;
1393
1394         netif_carrier_off(dev);
1395
1396         /*
1397          * tie the deviced together
1398          */
1399
1400         priv = netdev_priv(dev);
1401         rcu_assign_pointer(priv->peer, peer);
1402
1403         priv = netdev_priv(peer);
1404         rcu_assign_pointer(priv->peer, dev);
1405
1406         return 0;
1407
1408 err_register_dev:
1409         /* nothing to do */
1410 err_configure_peer:
1411         unregister_netdevice(peer);
1412         return err;
1413
1414 err_register_peer:
1415         free_netdev(peer);
1416         return err;
1417 }
1418
1419 static void veth_dellink(struct net_device *dev, struct list_head *head)
1420 {
1421         struct veth_priv *priv;
1422         struct net_device *peer;
1423
1424         priv = netdev_priv(dev);
1425         peer = rtnl_dereference(priv->peer);
1426
1427         /* Note : dellink() is called from default_device_exit_batch(),
1428          * before a rcu_synchronize() point. The devices are guaranteed
1429          * not being freed before one RCU grace period.
1430          */
1431         RCU_INIT_POINTER(priv->peer, NULL);
1432         unregister_netdevice_queue(dev, head);
1433
1434         if (peer) {
1435                 priv = netdev_priv(peer);
1436                 RCU_INIT_POINTER(priv->peer, NULL);
1437                 unregister_netdevice_queue(peer, head);
1438         }
1439 }
1440
1441 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1442         [VETH_INFO_PEER]        = { .len = sizeof(struct ifinfomsg) },
1443 };
1444
1445 static struct net *veth_get_link_net(const struct net_device *dev)
1446 {
1447         struct veth_priv *priv = netdev_priv(dev);
1448         struct net_device *peer = rtnl_dereference(priv->peer);
1449
1450         return peer ? dev_net(peer) : dev_net(dev);
1451 }
1452
1453 static struct rtnl_link_ops veth_link_ops = {
1454         .kind           = DRV_NAME,
1455         .priv_size      = sizeof(struct veth_priv),
1456         .setup          = veth_setup,
1457         .validate       = veth_validate,
1458         .newlink        = veth_newlink,
1459         .dellink        = veth_dellink,
1460         .policy         = veth_policy,
1461         .maxtype        = VETH_INFO_MAX,
1462         .get_link_net   = veth_get_link_net,
1463 };
1464
1465 /*
1466  * init/fini
1467  */
1468
1469 static __init int veth_init(void)
1470 {
1471         return rtnl_link_register(&veth_link_ops);
1472 }
1473
1474 static __exit void veth_exit(void)
1475 {
1476         rtnl_link_unregister(&veth_link_ops);
1477 }
1478
1479 module_init(veth_init);
1480 module_exit(veth_exit);
1481
1482 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1483 MODULE_LICENSE("GPL v2");
1484 MODULE_ALIAS_RTNL_LINK(DRV_NAME);