net: r6040: Allow restarting auto-negotiation
[linux-2.6-microblaze.git] / drivers / net / virtio_net.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
3  *
4  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5  */
6 //#define DEBUG
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <net/route.h>
23 #include <net/xdp.h>
24 #include <net/net_failover.h>
25
26 static int napi_weight = NAPI_POLL_WEIGHT;
27 module_param(napi_weight, int, 0444);
28
29 static bool csum = true, gso = true, napi_tx = true;
30 module_param(csum, bool, 0444);
31 module_param(gso, bool, 0444);
32 module_param(napi_tx, bool, 0644);
33
34 /* FIXME: MTU in config. */
35 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
36 #define GOOD_COPY_LEN   128
37
38 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
39
40 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
41 #define VIRTIO_XDP_HEADROOM 256
42
43 /* Separating two types of XDP xmit */
44 #define VIRTIO_XDP_TX           BIT(0)
45 #define VIRTIO_XDP_REDIR        BIT(1)
46
47 #define VIRTIO_XDP_FLAG BIT(0)
48
49 /* RX packet size EWMA. The average packet size is used to determine the packet
50  * buffer size when refilling RX rings. As the entire RX ring may be refilled
51  * at once, the weight is chosen so that the EWMA will be insensitive to short-
52  * term, transient changes in packet size.
53  */
54 DECLARE_EWMA(pkt_len, 0, 64)
55
56 #define VIRTNET_DRIVER_VERSION "1.0.0"
57
58 static const unsigned long guest_offloads[] = {
59         VIRTIO_NET_F_GUEST_TSO4,
60         VIRTIO_NET_F_GUEST_TSO6,
61         VIRTIO_NET_F_GUEST_ECN,
62         VIRTIO_NET_F_GUEST_UFO,
63         VIRTIO_NET_F_GUEST_CSUM
64 };
65
66 #define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
67                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
68                                 (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
69                                 (1ULL << VIRTIO_NET_F_GUEST_UFO))
70
71 struct virtnet_stat_desc {
72         char desc[ETH_GSTRING_LEN];
73         size_t offset;
74 };
75
76 struct virtnet_sq_stats {
77         struct u64_stats_sync syncp;
78         u64 packets;
79         u64 bytes;
80         u64 xdp_tx;
81         u64 xdp_tx_drops;
82         u64 kicks;
83 };
84
85 struct virtnet_rq_stats {
86         struct u64_stats_sync syncp;
87         u64 packets;
88         u64 bytes;
89         u64 drops;
90         u64 xdp_packets;
91         u64 xdp_tx;
92         u64 xdp_redirects;
93         u64 xdp_drops;
94         u64 kicks;
95 };
96
97 #define VIRTNET_SQ_STAT(m)      offsetof(struct virtnet_sq_stats, m)
98 #define VIRTNET_RQ_STAT(m)      offsetof(struct virtnet_rq_stats, m)
99
100 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
101         { "packets",            VIRTNET_SQ_STAT(packets) },
102         { "bytes",              VIRTNET_SQ_STAT(bytes) },
103         { "xdp_tx",             VIRTNET_SQ_STAT(xdp_tx) },
104         { "xdp_tx_drops",       VIRTNET_SQ_STAT(xdp_tx_drops) },
105         { "kicks",              VIRTNET_SQ_STAT(kicks) },
106 };
107
108 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
109         { "packets",            VIRTNET_RQ_STAT(packets) },
110         { "bytes",              VIRTNET_RQ_STAT(bytes) },
111         { "drops",              VIRTNET_RQ_STAT(drops) },
112         { "xdp_packets",        VIRTNET_RQ_STAT(xdp_packets) },
113         { "xdp_tx",             VIRTNET_RQ_STAT(xdp_tx) },
114         { "xdp_redirects",      VIRTNET_RQ_STAT(xdp_redirects) },
115         { "xdp_drops",          VIRTNET_RQ_STAT(xdp_drops) },
116         { "kicks",              VIRTNET_RQ_STAT(kicks) },
117 };
118
119 #define VIRTNET_SQ_STATS_LEN    ARRAY_SIZE(virtnet_sq_stats_desc)
120 #define VIRTNET_RQ_STATS_LEN    ARRAY_SIZE(virtnet_rq_stats_desc)
121
122 /* Internal representation of a send virtqueue */
123 struct send_queue {
124         /* Virtqueue associated with this send _queue */
125         struct virtqueue *vq;
126
127         /* TX: fragments + linear part + virtio header */
128         struct scatterlist sg[MAX_SKB_FRAGS + 2];
129
130         /* Name of the send queue: output.$index */
131         char name[40];
132
133         struct virtnet_sq_stats stats;
134
135         struct napi_struct napi;
136 };
137
138 /* Internal representation of a receive virtqueue */
139 struct receive_queue {
140         /* Virtqueue associated with this receive_queue */
141         struct virtqueue *vq;
142
143         struct napi_struct napi;
144
145         struct bpf_prog __rcu *xdp_prog;
146
147         struct virtnet_rq_stats stats;
148
149         /* Chain pages by the private ptr. */
150         struct page *pages;
151
152         /* Average packet length for mergeable receive buffers. */
153         struct ewma_pkt_len mrg_avg_pkt_len;
154
155         /* Page frag for packet buffer allocation. */
156         struct page_frag alloc_frag;
157
158         /* RX: fragments + linear part + virtio header */
159         struct scatterlist sg[MAX_SKB_FRAGS + 2];
160
161         /* Min single buffer size for mergeable buffers case. */
162         unsigned int min_buf_len;
163
164         /* Name of this receive queue: input.$index */
165         char name[40];
166
167         struct xdp_rxq_info xdp_rxq;
168 };
169
170 /* Control VQ buffers: protected by the rtnl lock */
171 struct control_buf {
172         struct virtio_net_ctrl_hdr hdr;
173         virtio_net_ctrl_ack status;
174         struct virtio_net_ctrl_mq mq;
175         u8 promisc;
176         u8 allmulti;
177         __virtio16 vid;
178         __virtio64 offloads;
179 };
180
181 struct virtnet_info {
182         struct virtio_device *vdev;
183         struct virtqueue *cvq;
184         struct net_device *dev;
185         struct send_queue *sq;
186         struct receive_queue *rq;
187         unsigned int status;
188
189         /* Max # of queue pairs supported by the device */
190         u16 max_queue_pairs;
191
192         /* # of queue pairs currently used by the driver */
193         u16 curr_queue_pairs;
194
195         /* # of XDP queue pairs currently used by the driver */
196         u16 xdp_queue_pairs;
197
198         /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
199         bool xdp_enabled;
200
201         /* I like... big packets and I cannot lie! */
202         bool big_packets;
203
204         /* Host will merge rx buffers for big packets (shake it! shake it!) */
205         bool mergeable_rx_bufs;
206
207         /* Has control virtqueue */
208         bool has_cvq;
209
210         /* Host can handle any s/g split between our header and packet data */
211         bool any_header_sg;
212
213         /* Packet virtio header size */
214         u8 hdr_len;
215
216         /* Work struct for refilling if we run low on memory. */
217         struct delayed_work refill;
218
219         /* Work struct for config space updates */
220         struct work_struct config_work;
221
222         /* Does the affinity hint is set for virtqueues? */
223         bool affinity_hint_set;
224
225         /* CPU hotplug instances for online & dead */
226         struct hlist_node node;
227         struct hlist_node node_dead;
228
229         struct control_buf *ctrl;
230
231         /* Ethtool settings */
232         u8 duplex;
233         u32 speed;
234
235         unsigned long guest_offloads;
236         unsigned long guest_offloads_capable;
237
238         /* failover when STANDBY feature enabled */
239         struct failover *failover;
240 };
241
242 struct padded_vnet_hdr {
243         struct virtio_net_hdr_mrg_rxbuf hdr;
244         /*
245          * hdr is in a separate sg buffer, and data sg buffer shares same page
246          * with this header sg. This padding makes next sg 16 byte aligned
247          * after the header.
248          */
249         char padding[4];
250 };
251
252 static bool is_xdp_frame(void *ptr)
253 {
254         return (unsigned long)ptr & VIRTIO_XDP_FLAG;
255 }
256
257 static void *xdp_to_ptr(struct xdp_frame *ptr)
258 {
259         return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
260 }
261
262 static struct xdp_frame *ptr_to_xdp(void *ptr)
263 {
264         return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
265 }
266
267 /* Converting between virtqueue no. and kernel tx/rx queue no.
268  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
269  */
270 static int vq2txq(struct virtqueue *vq)
271 {
272         return (vq->index - 1) / 2;
273 }
274
275 static int txq2vq(int txq)
276 {
277         return txq * 2 + 1;
278 }
279
280 static int vq2rxq(struct virtqueue *vq)
281 {
282         return vq->index / 2;
283 }
284
285 static int rxq2vq(int rxq)
286 {
287         return rxq * 2;
288 }
289
290 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
291 {
292         return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
293 }
294
295 /*
296  * private is used to chain pages for big packets, put the whole
297  * most recent used list in the beginning for reuse
298  */
299 static void give_pages(struct receive_queue *rq, struct page *page)
300 {
301         struct page *end;
302
303         /* Find end of list, sew whole thing into vi->rq.pages. */
304         for (end = page; end->private; end = (struct page *)end->private);
305         end->private = (unsigned long)rq->pages;
306         rq->pages = page;
307 }
308
309 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
310 {
311         struct page *p = rq->pages;
312
313         if (p) {
314                 rq->pages = (struct page *)p->private;
315                 /* clear private here, it is used to chain pages */
316                 p->private = 0;
317         } else
318                 p = alloc_page(gfp_mask);
319         return p;
320 }
321
322 static void virtqueue_napi_schedule(struct napi_struct *napi,
323                                     struct virtqueue *vq)
324 {
325         if (napi_schedule_prep(napi)) {
326                 virtqueue_disable_cb(vq);
327                 __napi_schedule(napi);
328         }
329 }
330
331 static void virtqueue_napi_complete(struct napi_struct *napi,
332                                     struct virtqueue *vq, int processed)
333 {
334         int opaque;
335
336         opaque = virtqueue_enable_cb_prepare(vq);
337         if (napi_complete_done(napi, processed)) {
338                 if (unlikely(virtqueue_poll(vq, opaque)))
339                         virtqueue_napi_schedule(napi, vq);
340         } else {
341                 virtqueue_disable_cb(vq);
342         }
343 }
344
345 static void skb_xmit_done(struct virtqueue *vq)
346 {
347         struct virtnet_info *vi = vq->vdev->priv;
348         struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
349
350         /* Suppress further interrupts. */
351         virtqueue_disable_cb(vq);
352
353         if (napi->weight)
354                 virtqueue_napi_schedule(napi, vq);
355         else
356                 /* We were probably waiting for more output buffers. */
357                 netif_wake_subqueue(vi->dev, vq2txq(vq));
358 }
359
360 #define MRG_CTX_HEADER_SHIFT 22
361 static void *mergeable_len_to_ctx(unsigned int truesize,
362                                   unsigned int headroom)
363 {
364         return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
365 }
366
367 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
368 {
369         return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
370 }
371
372 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
373 {
374         return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
375 }
376
377 /* Called from bottom half context */
378 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
379                                    struct receive_queue *rq,
380                                    struct page *page, unsigned int offset,
381                                    unsigned int len, unsigned int truesize,
382                                    bool hdr_valid, unsigned int metasize,
383                                    bool whole_page)
384 {
385         struct sk_buff *skb;
386         struct virtio_net_hdr_mrg_rxbuf *hdr;
387         unsigned int copy, hdr_len, hdr_padded_len;
388         struct page *page_to_free = NULL;
389         int tailroom, shinfo_size;
390         char *p, *hdr_p, *buf;
391
392         p = page_address(page) + offset;
393         hdr_p = p;
394
395         hdr_len = vi->hdr_len;
396         if (vi->mergeable_rx_bufs)
397                 hdr_padded_len = sizeof(*hdr);
398         else
399                 hdr_padded_len = sizeof(struct padded_vnet_hdr);
400
401         /* If whole_page, there is an offset between the beginning of the
402          * data and the allocated space, otherwise the data and the allocated
403          * space are aligned.
404          */
405         if (whole_page) {
406                 /* Buffers with whole_page use PAGE_SIZE as alloc size,
407                  * see add_recvbuf_mergeable() + get_mergeable_buf_len()
408                  */
409                 truesize = PAGE_SIZE;
410
411                 /* page maybe head page, so we should get the buf by p, not the
412                  * page
413                  */
414                 tailroom = truesize - len - offset_in_page(p);
415                 buf = (char *)((unsigned long)p & PAGE_MASK);
416         } else {
417                 tailroom = truesize - len;
418                 buf = p;
419         }
420
421         len -= hdr_len;
422         offset += hdr_padded_len;
423         p += hdr_padded_len;
424
425         shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
426
427         /* copy small packet so we can reuse these pages */
428         if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
429                 skb = build_skb(buf, truesize);
430                 if (unlikely(!skb))
431                         return NULL;
432
433                 skb_reserve(skb, p - buf);
434                 skb_put(skb, len);
435                 goto ok;
436         }
437
438         /* copy small packet so we can reuse these pages for small data */
439         skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
440         if (unlikely(!skb))
441                 return NULL;
442
443         /* Copy all frame if it fits skb->head, otherwise
444          * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
445          */
446         if (len <= skb_tailroom(skb))
447                 copy = len;
448         else
449                 copy = ETH_HLEN + metasize;
450         skb_put_data(skb, p, copy);
451
452         len -= copy;
453         offset += copy;
454
455         if (vi->mergeable_rx_bufs) {
456                 if (len)
457                         skb_add_rx_frag(skb, 0, page, offset, len, truesize);
458                 else
459                         page_to_free = page;
460                 goto ok;
461         }
462
463         /*
464          * Verify that we can indeed put this data into a skb.
465          * This is here to handle cases when the device erroneously
466          * tries to receive more than is possible. This is usually
467          * the case of a broken device.
468          */
469         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
470                 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
471                 dev_kfree_skb(skb);
472                 return NULL;
473         }
474         BUG_ON(offset >= PAGE_SIZE);
475         while (len) {
476                 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
477                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
478                                 frag_size, truesize);
479                 len -= frag_size;
480                 page = (struct page *)page->private;
481                 offset = 0;
482         }
483
484         if (page)
485                 give_pages(rq, page);
486
487 ok:
488         /* hdr_valid means no XDP, so we can copy the vnet header */
489         if (hdr_valid) {
490                 hdr = skb_vnet_hdr(skb);
491                 memcpy(hdr, hdr_p, hdr_len);
492         }
493         if (page_to_free)
494                 put_page(page_to_free);
495
496         if (metasize) {
497                 __skb_pull(skb, metasize);
498                 skb_metadata_set(skb, metasize);
499         }
500
501         return skb;
502 }
503
504 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
505                                    struct send_queue *sq,
506                                    struct xdp_frame *xdpf)
507 {
508         struct virtio_net_hdr_mrg_rxbuf *hdr;
509         int err;
510
511         if (unlikely(xdpf->headroom < vi->hdr_len))
512                 return -EOVERFLOW;
513
514         /* Make room for virtqueue hdr (also change xdpf->headroom?) */
515         xdpf->data -= vi->hdr_len;
516         /* Zero header and leave csum up to XDP layers */
517         hdr = xdpf->data;
518         memset(hdr, 0, vi->hdr_len);
519         xdpf->len   += vi->hdr_len;
520
521         sg_init_one(sq->sg, xdpf->data, xdpf->len);
522
523         err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
524                                    GFP_ATOMIC);
525         if (unlikely(err))
526                 return -ENOSPC; /* Caller handle free/refcnt */
527
528         return 0;
529 }
530
531 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
532  * the current cpu, so it does not need to be locked.
533  *
534  * Here we use marco instead of inline functions because we have to deal with
535  * three issues at the same time: 1. the choice of sq. 2. judge and execute the
536  * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
537  * functions to perfectly solve these three problems at the same time.
538  */
539 #define virtnet_xdp_get_sq(vi) ({                                       \
540         struct netdev_queue *txq;                                       \
541         typeof(vi) v = (vi);                                            \
542         unsigned int qp;                                                \
543                                                                         \
544         if (v->curr_queue_pairs > nr_cpu_ids) {                         \
545                 qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
546                 qp += smp_processor_id();                               \
547                 txq = netdev_get_tx_queue(v->dev, qp);                  \
548                 __netif_tx_acquire(txq);                                \
549         } else {                                                        \
550                 qp = smp_processor_id() % v->curr_queue_pairs;          \
551                 txq = netdev_get_tx_queue(v->dev, qp);                  \
552                 __netif_tx_lock(txq, raw_smp_processor_id());           \
553         }                                                               \
554         v->sq + qp;                                                     \
555 })
556
557 #define virtnet_xdp_put_sq(vi, q) {                                     \
558         struct netdev_queue *txq;                                       \
559         typeof(vi) v = (vi);                                            \
560                                                                         \
561         txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
562         if (v->curr_queue_pairs > nr_cpu_ids)                           \
563                 __netif_tx_release(txq);                                \
564         else                                                            \
565                 __netif_tx_unlock(txq);                                 \
566 }
567
568 static int virtnet_xdp_xmit(struct net_device *dev,
569                             int n, struct xdp_frame **frames, u32 flags)
570 {
571         struct virtnet_info *vi = netdev_priv(dev);
572         struct receive_queue *rq = vi->rq;
573         struct bpf_prog *xdp_prog;
574         struct send_queue *sq;
575         unsigned int len;
576         int packets = 0;
577         int bytes = 0;
578         int nxmit = 0;
579         int kicks = 0;
580         void *ptr;
581         int ret;
582         int i;
583
584         /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
585          * indicate XDP resources have been successfully allocated.
586          */
587         xdp_prog = rcu_access_pointer(rq->xdp_prog);
588         if (!xdp_prog)
589                 return -ENXIO;
590
591         sq = virtnet_xdp_get_sq(vi);
592
593         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
594                 ret = -EINVAL;
595                 goto out;
596         }
597
598         /* Free up any pending old buffers before queueing new ones. */
599         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
600                 if (likely(is_xdp_frame(ptr))) {
601                         struct xdp_frame *frame = ptr_to_xdp(ptr);
602
603                         bytes += frame->len;
604                         xdp_return_frame(frame);
605                 } else {
606                         struct sk_buff *skb = ptr;
607
608                         bytes += skb->len;
609                         napi_consume_skb(skb, false);
610                 }
611                 packets++;
612         }
613
614         for (i = 0; i < n; i++) {
615                 struct xdp_frame *xdpf = frames[i];
616
617                 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
618                         break;
619                 nxmit++;
620         }
621         ret = nxmit;
622
623         if (flags & XDP_XMIT_FLUSH) {
624                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
625                         kicks = 1;
626         }
627 out:
628         u64_stats_update_begin(&sq->stats.syncp);
629         sq->stats.bytes += bytes;
630         sq->stats.packets += packets;
631         sq->stats.xdp_tx += n;
632         sq->stats.xdp_tx_drops += n - nxmit;
633         sq->stats.kicks += kicks;
634         u64_stats_update_end(&sq->stats.syncp);
635
636         virtnet_xdp_put_sq(vi, sq);
637         return ret;
638 }
639
640 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
641 {
642         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
643 }
644
645 /* We copy the packet for XDP in the following cases:
646  *
647  * 1) Packet is scattered across multiple rx buffers.
648  * 2) Headroom space is insufficient.
649  *
650  * This is inefficient but it's a temporary condition that
651  * we hit right after XDP is enabled and until queue is refilled
652  * with large buffers with sufficient headroom - so it should affect
653  * at most queue size packets.
654  * Afterwards, the conditions to enable
655  * XDP should preclude the underlying device from sending packets
656  * across multiple buffers (num_buf > 1), and we make sure buffers
657  * have enough headroom.
658  */
659 static struct page *xdp_linearize_page(struct receive_queue *rq,
660                                        u16 *num_buf,
661                                        struct page *p,
662                                        int offset,
663                                        int page_off,
664                                        unsigned int *len)
665 {
666         struct page *page = alloc_page(GFP_ATOMIC);
667
668         if (!page)
669                 return NULL;
670
671         memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
672         page_off += *len;
673
674         while (--*num_buf) {
675                 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
676                 unsigned int buflen;
677                 void *buf;
678                 int off;
679
680                 buf = virtqueue_get_buf(rq->vq, &buflen);
681                 if (unlikely(!buf))
682                         goto err_buf;
683
684                 p = virt_to_head_page(buf);
685                 off = buf - page_address(p);
686
687                 /* guard against a misconfigured or uncooperative backend that
688                  * is sending packet larger than the MTU.
689                  */
690                 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
691                         put_page(p);
692                         goto err_buf;
693                 }
694
695                 memcpy(page_address(page) + page_off,
696                        page_address(p) + off, buflen);
697                 page_off += buflen;
698                 put_page(p);
699         }
700
701         /* Headroom does not contribute to packet length */
702         *len = page_off - VIRTIO_XDP_HEADROOM;
703         return page;
704 err_buf:
705         __free_pages(page, 0);
706         return NULL;
707 }
708
709 static struct sk_buff *receive_small(struct net_device *dev,
710                                      struct virtnet_info *vi,
711                                      struct receive_queue *rq,
712                                      void *buf, void *ctx,
713                                      unsigned int len,
714                                      unsigned int *xdp_xmit,
715                                      struct virtnet_rq_stats *stats)
716 {
717         struct sk_buff *skb;
718         struct bpf_prog *xdp_prog;
719         unsigned int xdp_headroom = (unsigned long)ctx;
720         unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
721         unsigned int headroom = vi->hdr_len + header_offset;
722         unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
723                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
724         struct page *page = virt_to_head_page(buf);
725         unsigned int delta = 0;
726         struct page *xdp_page;
727         int err;
728         unsigned int metasize = 0;
729
730         len -= vi->hdr_len;
731         stats->bytes += len;
732
733         rcu_read_lock();
734         xdp_prog = rcu_dereference(rq->xdp_prog);
735         if (xdp_prog) {
736                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
737                 struct xdp_frame *xdpf;
738                 struct xdp_buff xdp;
739                 void *orig_data;
740                 u32 act;
741
742                 if (unlikely(hdr->hdr.gso_type))
743                         goto err_xdp;
744
745                 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
746                         int offset = buf - page_address(page) + header_offset;
747                         unsigned int tlen = len + vi->hdr_len;
748                         u16 num_buf = 1;
749
750                         xdp_headroom = virtnet_get_headroom(vi);
751                         header_offset = VIRTNET_RX_PAD + xdp_headroom;
752                         headroom = vi->hdr_len + header_offset;
753                         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
754                                  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
755                         xdp_page = xdp_linearize_page(rq, &num_buf, page,
756                                                       offset, header_offset,
757                                                       &tlen);
758                         if (!xdp_page)
759                                 goto err_xdp;
760
761                         buf = page_address(xdp_page);
762                         put_page(page);
763                         page = xdp_page;
764                 }
765
766                 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
767                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
768                                  xdp_headroom, len, true);
769                 orig_data = xdp.data;
770                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
771                 stats->xdp_packets++;
772
773                 switch (act) {
774                 case XDP_PASS:
775                         /* Recalculate length in case bpf program changed it */
776                         delta = orig_data - xdp.data;
777                         len = xdp.data_end - xdp.data;
778                         metasize = xdp.data - xdp.data_meta;
779                         break;
780                 case XDP_TX:
781                         stats->xdp_tx++;
782                         xdpf = xdp_convert_buff_to_frame(&xdp);
783                         if (unlikely(!xdpf))
784                                 goto err_xdp;
785                         err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
786                         if (unlikely(!err)) {
787                                 xdp_return_frame_rx_napi(xdpf);
788                         } else if (unlikely(err < 0)) {
789                                 trace_xdp_exception(vi->dev, xdp_prog, act);
790                                 goto err_xdp;
791                         }
792                         *xdp_xmit |= VIRTIO_XDP_TX;
793                         rcu_read_unlock();
794                         goto xdp_xmit;
795                 case XDP_REDIRECT:
796                         stats->xdp_redirects++;
797                         err = xdp_do_redirect(dev, &xdp, xdp_prog);
798                         if (err)
799                                 goto err_xdp;
800                         *xdp_xmit |= VIRTIO_XDP_REDIR;
801                         rcu_read_unlock();
802                         goto xdp_xmit;
803                 default:
804                         bpf_warn_invalid_xdp_action(act);
805                         fallthrough;
806                 case XDP_ABORTED:
807                         trace_xdp_exception(vi->dev, xdp_prog, act);
808                         goto err_xdp;
809                 case XDP_DROP:
810                         goto err_xdp;
811                 }
812         }
813         rcu_read_unlock();
814
815         skb = build_skb(buf, buflen);
816         if (!skb) {
817                 put_page(page);
818                 goto err;
819         }
820         skb_reserve(skb, headroom - delta);
821         skb_put(skb, len);
822         if (!xdp_prog) {
823                 buf += header_offset;
824                 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
825         } /* keep zeroed vnet hdr since XDP is loaded */
826
827         if (metasize)
828                 skb_metadata_set(skb, metasize);
829
830 err:
831         return skb;
832
833 err_xdp:
834         rcu_read_unlock();
835         stats->xdp_drops++;
836         stats->drops++;
837         put_page(page);
838 xdp_xmit:
839         return NULL;
840 }
841
842 static struct sk_buff *receive_big(struct net_device *dev,
843                                    struct virtnet_info *vi,
844                                    struct receive_queue *rq,
845                                    void *buf,
846                                    unsigned int len,
847                                    struct virtnet_rq_stats *stats)
848 {
849         struct page *page = buf;
850         struct sk_buff *skb =
851                 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
852
853         stats->bytes += len - vi->hdr_len;
854         if (unlikely(!skb))
855                 goto err;
856
857         return skb;
858
859 err:
860         stats->drops++;
861         give_pages(rq, page);
862         return NULL;
863 }
864
865 static struct sk_buff *receive_mergeable(struct net_device *dev,
866                                          struct virtnet_info *vi,
867                                          struct receive_queue *rq,
868                                          void *buf,
869                                          void *ctx,
870                                          unsigned int len,
871                                          unsigned int *xdp_xmit,
872                                          struct virtnet_rq_stats *stats)
873 {
874         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
875         u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
876         struct page *page = virt_to_head_page(buf);
877         int offset = buf - page_address(page);
878         struct sk_buff *head_skb, *curr_skb;
879         struct bpf_prog *xdp_prog;
880         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
881         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
882         unsigned int metasize = 0;
883         unsigned int frame_sz;
884         int err;
885
886         head_skb = NULL;
887         stats->bytes += len - vi->hdr_len;
888
889         rcu_read_lock();
890         xdp_prog = rcu_dereference(rq->xdp_prog);
891         if (xdp_prog) {
892                 struct xdp_frame *xdpf;
893                 struct page *xdp_page;
894                 struct xdp_buff xdp;
895                 void *data;
896                 u32 act;
897
898                 /* Transient failure which in theory could occur if
899                  * in-flight packets from before XDP was enabled reach
900                  * the receive path after XDP is loaded.
901                  */
902                 if (unlikely(hdr->hdr.gso_type))
903                         goto err_xdp;
904
905                 /* Buffers with headroom use PAGE_SIZE as alloc size,
906                  * see add_recvbuf_mergeable() + get_mergeable_buf_len()
907                  */
908                 frame_sz = headroom ? PAGE_SIZE : truesize;
909
910                 /* This happens when rx buffer size is underestimated
911                  * or headroom is not enough because of the buffer
912                  * was refilled before XDP is set. This should only
913                  * happen for the first several packets, so we don't
914                  * care much about its performance.
915                  */
916                 if (unlikely(num_buf > 1 ||
917                              headroom < virtnet_get_headroom(vi))) {
918                         /* linearize data for XDP */
919                         xdp_page = xdp_linearize_page(rq, &num_buf,
920                                                       page, offset,
921                                                       VIRTIO_XDP_HEADROOM,
922                                                       &len);
923                         frame_sz = PAGE_SIZE;
924
925                         if (!xdp_page)
926                                 goto err_xdp;
927                         offset = VIRTIO_XDP_HEADROOM;
928                 } else {
929                         xdp_page = page;
930                 }
931
932                 /* Allow consuming headroom but reserve enough space to push
933                  * the descriptor on if we get an XDP_TX return code.
934                  */
935                 data = page_address(xdp_page) + offset;
936                 xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
937                 xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
938                                  VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
939
940                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
941                 stats->xdp_packets++;
942
943                 switch (act) {
944                 case XDP_PASS:
945                         metasize = xdp.data - xdp.data_meta;
946
947                         /* recalculate offset to account for any header
948                          * adjustments and minus the metasize to copy the
949                          * metadata in page_to_skb(). Note other cases do not
950                          * build an skb and avoid using offset
951                          */
952                         offset = xdp.data - page_address(xdp_page) -
953                                  vi->hdr_len - metasize;
954
955                         /* recalculate len if xdp.data, xdp.data_end or
956                          * xdp.data_meta were adjusted
957                          */
958                         len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
959                         /* We can only create skb based on xdp_page. */
960                         if (unlikely(xdp_page != page)) {
961                                 rcu_read_unlock();
962                                 put_page(page);
963                                 head_skb = page_to_skb(vi, rq, xdp_page, offset,
964                                                        len, PAGE_SIZE, false,
965                                                        metasize, true);
966                                 return head_skb;
967                         }
968                         break;
969                 case XDP_TX:
970                         stats->xdp_tx++;
971                         xdpf = xdp_convert_buff_to_frame(&xdp);
972                         if (unlikely(!xdpf))
973                                 goto err_xdp;
974                         err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
975                         if (unlikely(!err)) {
976                                 xdp_return_frame_rx_napi(xdpf);
977                         } else if (unlikely(err < 0)) {
978                                 trace_xdp_exception(vi->dev, xdp_prog, act);
979                                 if (unlikely(xdp_page != page))
980                                         put_page(xdp_page);
981                                 goto err_xdp;
982                         }
983                         *xdp_xmit |= VIRTIO_XDP_TX;
984                         if (unlikely(xdp_page != page))
985                                 put_page(page);
986                         rcu_read_unlock();
987                         goto xdp_xmit;
988                 case XDP_REDIRECT:
989                         stats->xdp_redirects++;
990                         err = xdp_do_redirect(dev, &xdp, xdp_prog);
991                         if (err) {
992                                 if (unlikely(xdp_page != page))
993                                         put_page(xdp_page);
994                                 goto err_xdp;
995                         }
996                         *xdp_xmit |= VIRTIO_XDP_REDIR;
997                         if (unlikely(xdp_page != page))
998                                 put_page(page);
999                         rcu_read_unlock();
1000                         goto xdp_xmit;
1001                 default:
1002                         bpf_warn_invalid_xdp_action(act);
1003                         fallthrough;
1004                 case XDP_ABORTED:
1005                         trace_xdp_exception(vi->dev, xdp_prog, act);
1006                         fallthrough;
1007                 case XDP_DROP:
1008                         if (unlikely(xdp_page != page))
1009                                 __free_pages(xdp_page, 0);
1010                         goto err_xdp;
1011                 }
1012         }
1013         rcu_read_unlock();
1014
1015         if (unlikely(len > truesize)) {
1016                 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1017                          dev->name, len, (unsigned long)ctx);
1018                 dev->stats.rx_length_errors++;
1019                 goto err_skb;
1020         }
1021
1022         head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
1023                                metasize, !!headroom);
1024         curr_skb = head_skb;
1025
1026         if (unlikely(!curr_skb))
1027                 goto err_skb;
1028         while (--num_buf) {
1029                 int num_skb_frags;
1030
1031                 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
1032                 if (unlikely(!buf)) {
1033                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1034                                  dev->name, num_buf,
1035                                  virtio16_to_cpu(vi->vdev,
1036                                                  hdr->num_buffers));
1037                         dev->stats.rx_length_errors++;
1038                         goto err_buf;
1039                 }
1040
1041                 stats->bytes += len;
1042                 page = virt_to_head_page(buf);
1043
1044                 truesize = mergeable_ctx_to_truesize(ctx);
1045                 if (unlikely(len > truesize)) {
1046                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1047                                  dev->name, len, (unsigned long)ctx);
1048                         dev->stats.rx_length_errors++;
1049                         goto err_skb;
1050                 }
1051
1052                 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1053                 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1054                         struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1055
1056                         if (unlikely(!nskb))
1057                                 goto err_skb;
1058                         if (curr_skb == head_skb)
1059                                 skb_shinfo(curr_skb)->frag_list = nskb;
1060                         else
1061                                 curr_skb->next = nskb;
1062                         curr_skb = nskb;
1063                         head_skb->truesize += nskb->truesize;
1064                         num_skb_frags = 0;
1065                 }
1066                 if (curr_skb != head_skb) {
1067                         head_skb->data_len += len;
1068                         head_skb->len += len;
1069                         head_skb->truesize += truesize;
1070                 }
1071                 offset = buf - page_address(page);
1072                 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1073                         put_page(page);
1074                         skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1075                                              len, truesize);
1076                 } else {
1077                         skb_add_rx_frag(curr_skb, num_skb_frags, page,
1078                                         offset, len, truesize);
1079                 }
1080         }
1081
1082         ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1083         return head_skb;
1084
1085 err_xdp:
1086         rcu_read_unlock();
1087         stats->xdp_drops++;
1088 err_skb:
1089         put_page(page);
1090         while (num_buf-- > 1) {
1091                 buf = virtqueue_get_buf(rq->vq, &len);
1092                 if (unlikely(!buf)) {
1093                         pr_debug("%s: rx error: %d buffers missing\n",
1094                                  dev->name, num_buf);
1095                         dev->stats.rx_length_errors++;
1096                         break;
1097                 }
1098                 stats->bytes += len;
1099                 page = virt_to_head_page(buf);
1100                 put_page(page);
1101         }
1102 err_buf:
1103         stats->drops++;
1104         dev_kfree_skb(head_skb);
1105 xdp_xmit:
1106         return NULL;
1107 }
1108
1109 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1110                         void *buf, unsigned int len, void **ctx,
1111                         unsigned int *xdp_xmit,
1112                         struct virtnet_rq_stats *stats)
1113 {
1114         struct net_device *dev = vi->dev;
1115         struct sk_buff *skb;
1116         struct virtio_net_hdr_mrg_rxbuf *hdr;
1117
1118         if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1119                 pr_debug("%s: short packet %i\n", dev->name, len);
1120                 dev->stats.rx_length_errors++;
1121                 if (vi->mergeable_rx_bufs) {
1122                         put_page(virt_to_head_page(buf));
1123                 } else if (vi->big_packets) {
1124                         give_pages(rq, buf);
1125                 } else {
1126                         put_page(virt_to_head_page(buf));
1127                 }
1128                 return;
1129         }
1130
1131         if (vi->mergeable_rx_bufs)
1132                 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1133                                         stats);
1134         else if (vi->big_packets)
1135                 skb = receive_big(dev, vi, rq, buf, len, stats);
1136         else
1137                 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1138
1139         if (unlikely(!skb))
1140                 return;
1141
1142         hdr = skb_vnet_hdr(skb);
1143
1144         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1145                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1146
1147         if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1148                                   virtio_is_little_endian(vi->vdev))) {
1149                 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1150                                      dev->name, hdr->hdr.gso_type,
1151                                      hdr->hdr.gso_size);
1152                 goto frame_err;
1153         }
1154
1155         skb_record_rx_queue(skb, vq2rxq(rq->vq));
1156         skb->protocol = eth_type_trans(skb, dev);
1157         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1158                  ntohs(skb->protocol), skb->len, skb->pkt_type);
1159
1160         napi_gro_receive(&rq->napi, skb);
1161         return;
1162
1163 frame_err:
1164         dev->stats.rx_frame_errors++;
1165         dev_kfree_skb(skb);
1166 }
1167
1168 /* Unlike mergeable buffers, all buffers are allocated to the
1169  * same size, except for the headroom. For this reason we do
1170  * not need to use  mergeable_len_to_ctx here - it is enough
1171  * to store the headroom as the context ignoring the truesize.
1172  */
1173 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1174                              gfp_t gfp)
1175 {
1176         struct page_frag *alloc_frag = &rq->alloc_frag;
1177         char *buf;
1178         unsigned int xdp_headroom = virtnet_get_headroom(vi);
1179         void *ctx = (void *)(unsigned long)xdp_headroom;
1180         int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1181         int err;
1182
1183         len = SKB_DATA_ALIGN(len) +
1184               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1185         if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
1186                 return -ENOMEM;
1187
1188         buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1189         get_page(alloc_frag->page);
1190         alloc_frag->offset += len;
1191         sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
1192                     vi->hdr_len + GOOD_PACKET_LEN);
1193         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1194         if (err < 0)
1195                 put_page(virt_to_head_page(buf));
1196         return err;
1197 }
1198
1199 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1200                            gfp_t gfp)
1201 {
1202         struct page *first, *list = NULL;
1203         char *p;
1204         int i, err, offset;
1205
1206         sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
1207
1208         /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
1209         for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
1210                 first = get_a_page(rq, gfp);
1211                 if (!first) {
1212                         if (list)
1213                                 give_pages(rq, list);
1214                         return -ENOMEM;
1215                 }
1216                 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1217
1218                 /* chain new page in list head to match sg */
1219                 first->private = (unsigned long)list;
1220                 list = first;
1221         }
1222
1223         first = get_a_page(rq, gfp);
1224         if (!first) {
1225                 give_pages(rq, list);
1226                 return -ENOMEM;
1227         }
1228         p = page_address(first);
1229
1230         /* rq->sg[0], rq->sg[1] share the same page */
1231         /* a separated rq->sg[0] for header - required in case !any_header_sg */
1232         sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1233
1234         /* rq->sg[1] for data packet, from offset */
1235         offset = sizeof(struct padded_vnet_hdr);
1236         sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1237
1238         /* chain first in list head */
1239         first->private = (unsigned long)list;
1240         err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
1241                                   first, gfp);
1242         if (err < 0)
1243                 give_pages(rq, first);
1244
1245         return err;
1246 }
1247
1248 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1249                                           struct ewma_pkt_len *avg_pkt_len,
1250                                           unsigned int room)
1251 {
1252         const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1253         unsigned int len;
1254
1255         if (room)
1256                 return PAGE_SIZE - room;
1257
1258         len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1259                                 rq->min_buf_len, PAGE_SIZE - hdr_len);
1260
1261         return ALIGN(len, L1_CACHE_BYTES);
1262 }
1263
1264 static int add_recvbuf_mergeable(struct virtnet_info *vi,
1265                                  struct receive_queue *rq, gfp_t gfp)
1266 {
1267         struct page_frag *alloc_frag = &rq->alloc_frag;
1268         unsigned int headroom = virtnet_get_headroom(vi);
1269         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1270         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1271         char *buf;
1272         void *ctx;
1273         int err;
1274         unsigned int len, hole;
1275
1276         /* Extra tailroom is needed to satisfy XDP's assumption. This
1277          * means rx frags coalescing won't work, but consider we've
1278          * disabled GSO for XDP, it won't be a big issue.
1279          */
1280         len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1281         if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1282                 return -ENOMEM;
1283
1284         buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1285         buf += headroom; /* advance address leaving hole at front of pkt */
1286         get_page(alloc_frag->page);
1287         alloc_frag->offset += len + room;
1288         hole = alloc_frag->size - alloc_frag->offset;
1289         if (hole < len + room) {
1290                 /* To avoid internal fragmentation, if there is very likely not
1291                  * enough space for another buffer, add the remaining space to
1292                  * the current buffer.
1293                  */
1294                 len += hole;
1295                 alloc_frag->offset += hole;
1296         }
1297
1298         sg_init_one(rq->sg, buf, len);
1299         ctx = mergeable_len_to_ctx(len, headroom);
1300         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1301         if (err < 0)
1302                 put_page(virt_to_head_page(buf));
1303
1304         return err;
1305 }
1306
1307 /*
1308  * Returns false if we couldn't fill entirely (OOM).
1309  *
1310  * Normally run in the receive path, but can also be run from ndo_open
1311  * before we're receiving packets, or from refill_work which is
1312  * careful to disable receiving (using napi_disable).
1313  */
1314 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1315                           gfp_t gfp)
1316 {
1317         int err;
1318         bool oom;
1319
1320         do {
1321                 if (vi->mergeable_rx_bufs)
1322                         err = add_recvbuf_mergeable(vi, rq, gfp);
1323                 else if (vi->big_packets)
1324                         err = add_recvbuf_big(vi, rq, gfp);
1325                 else
1326                         err = add_recvbuf_small(vi, rq, gfp);
1327
1328                 oom = err == -ENOMEM;
1329                 if (err)
1330                         break;
1331         } while (rq->vq->num_free);
1332         if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
1333                 unsigned long flags;
1334
1335                 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
1336                 rq->stats.kicks++;
1337                 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
1338         }
1339
1340         return !oom;
1341 }
1342
1343 static void skb_recv_done(struct virtqueue *rvq)
1344 {
1345         struct virtnet_info *vi = rvq->vdev->priv;
1346         struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1347
1348         virtqueue_napi_schedule(&rq->napi, rvq);
1349 }
1350
1351 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
1352 {
1353         napi_enable(napi);
1354
1355         /* If all buffers were filled by other side before we napi_enabled, we
1356          * won't get another interrupt, so process any outstanding packets now.
1357          * Call local_bh_enable after to trigger softIRQ processing.
1358          */
1359         local_bh_disable();
1360         virtqueue_napi_schedule(napi, vq);
1361         local_bh_enable();
1362 }
1363
1364 static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1365                                    struct virtqueue *vq,
1366                                    struct napi_struct *napi)
1367 {
1368         if (!napi->weight)
1369                 return;
1370
1371         /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
1372          * enable the feature if this is likely affine with the transmit path.
1373          */
1374         if (!vi->affinity_hint_set) {
1375                 napi->weight = 0;
1376                 return;
1377         }
1378
1379         return virtnet_napi_enable(vq, napi);
1380 }
1381
1382 static void virtnet_napi_tx_disable(struct napi_struct *napi)
1383 {
1384         if (napi->weight)
1385                 napi_disable(napi);
1386 }
1387
1388 static void refill_work(struct work_struct *work)
1389 {
1390         struct virtnet_info *vi =
1391                 container_of(work, struct virtnet_info, refill.work);
1392         bool still_empty;
1393         int i;
1394
1395         for (i = 0; i < vi->curr_queue_pairs; i++) {
1396                 struct receive_queue *rq = &vi->rq[i];
1397
1398                 napi_disable(&rq->napi);
1399                 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1400                 virtnet_napi_enable(rq->vq, &rq->napi);
1401
1402                 /* In theory, this can happen: if we don't get any buffers in
1403                  * we will *never* try to fill again.
1404                  */
1405                 if (still_empty)
1406                         schedule_delayed_work(&vi->refill, HZ/2);
1407         }
1408 }
1409
1410 static int virtnet_receive(struct receive_queue *rq, int budget,
1411                            unsigned int *xdp_xmit)
1412 {
1413         struct virtnet_info *vi = rq->vq->vdev->priv;
1414         struct virtnet_rq_stats stats = {};
1415         unsigned int len;
1416         void *buf;
1417         int i;
1418
1419         if (!vi->big_packets || vi->mergeable_rx_bufs) {
1420                 void *ctx;
1421
1422                 while (stats.packets < budget &&
1423                        (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
1424                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
1425                         stats.packets++;
1426                 }
1427         } else {
1428                 while (stats.packets < budget &&
1429                        (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
1430                         receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
1431                         stats.packets++;
1432                 }
1433         }
1434
1435         if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
1436                 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1437                         schedule_delayed_work(&vi->refill, 0);
1438         }
1439
1440         u64_stats_update_begin(&rq->stats.syncp);
1441         for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
1442                 size_t offset = virtnet_rq_stats_desc[i].offset;
1443                 u64 *item;
1444
1445                 item = (u64 *)((u8 *)&rq->stats + offset);
1446                 *item += *(u64 *)((u8 *)&stats + offset);
1447         }
1448         u64_stats_update_end(&rq->stats.syncp);
1449
1450         return stats.packets;
1451 }
1452
1453 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1454 {
1455         unsigned int len;
1456         unsigned int packets = 0;
1457         unsigned int bytes = 0;
1458         void *ptr;
1459
1460         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1461                 if (likely(!is_xdp_frame(ptr))) {
1462                         struct sk_buff *skb = ptr;
1463
1464                         pr_debug("Sent skb %p\n", skb);
1465
1466                         bytes += skb->len;
1467                         napi_consume_skb(skb, in_napi);
1468                 } else {
1469                         struct xdp_frame *frame = ptr_to_xdp(ptr);
1470
1471                         bytes += frame->len;
1472                         xdp_return_frame(frame);
1473                 }
1474                 packets++;
1475         }
1476
1477         /* Avoid overhead when no packets have been processed
1478          * happens when called speculatively from start_xmit.
1479          */
1480         if (!packets)
1481                 return;
1482
1483         u64_stats_update_begin(&sq->stats.syncp);
1484         sq->stats.bytes += bytes;
1485         sq->stats.packets += packets;
1486         u64_stats_update_end(&sq->stats.syncp);
1487 }
1488
1489 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1490 {
1491         if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1492                 return false;
1493         else if (q < vi->curr_queue_pairs)
1494                 return true;
1495         else
1496                 return false;
1497 }
1498
1499 static void virtnet_poll_cleantx(struct receive_queue *rq)
1500 {
1501         struct virtnet_info *vi = rq->vq->vdev->priv;
1502         unsigned int index = vq2rxq(rq->vq);
1503         struct send_queue *sq = &vi->sq[index];
1504         struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1505
1506         if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1507                 return;
1508
1509         if (__netif_tx_trylock(txq)) {
1510                 free_old_xmit_skbs(sq, true);
1511                 __netif_tx_unlock(txq);
1512         }
1513
1514         if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1515                 netif_tx_wake_queue(txq);
1516 }
1517
1518 static int virtnet_poll(struct napi_struct *napi, int budget)
1519 {
1520         struct receive_queue *rq =
1521                 container_of(napi, struct receive_queue, napi);
1522         struct virtnet_info *vi = rq->vq->vdev->priv;
1523         struct send_queue *sq;
1524         unsigned int received;
1525         unsigned int xdp_xmit = 0;
1526
1527         virtnet_poll_cleantx(rq);
1528
1529         received = virtnet_receive(rq, budget, &xdp_xmit);
1530
1531         /* Out of packets? */
1532         if (received < budget)
1533                 virtqueue_napi_complete(napi, rq->vq, received);
1534
1535         if (xdp_xmit & VIRTIO_XDP_REDIR)
1536                 xdp_do_flush();
1537
1538         if (xdp_xmit & VIRTIO_XDP_TX) {
1539                 sq = virtnet_xdp_get_sq(vi);
1540                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1541                         u64_stats_update_begin(&sq->stats.syncp);
1542                         sq->stats.kicks++;
1543                         u64_stats_update_end(&sq->stats.syncp);
1544                 }
1545                 virtnet_xdp_put_sq(vi, sq);
1546         }
1547
1548         return received;
1549 }
1550
1551 static int virtnet_open(struct net_device *dev)
1552 {
1553         struct virtnet_info *vi = netdev_priv(dev);
1554         int i, err;
1555
1556         for (i = 0; i < vi->max_queue_pairs; i++) {
1557                 if (i < vi->curr_queue_pairs)
1558                         /* Make sure we have some buffers: if oom use wq. */
1559                         if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1560                                 schedule_delayed_work(&vi->refill, 0);
1561
1562                 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
1563                 if (err < 0)
1564                         return err;
1565
1566                 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
1567                                                  MEM_TYPE_PAGE_SHARED, NULL);
1568                 if (err < 0) {
1569                         xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1570                         return err;
1571                 }
1572
1573                 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
1574                 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
1575         }
1576
1577         return 0;
1578 }
1579
1580 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1581 {
1582         struct send_queue *sq = container_of(napi, struct send_queue, napi);
1583         struct virtnet_info *vi = sq->vq->vdev->priv;
1584         unsigned int index = vq2txq(sq->vq);
1585         struct netdev_queue *txq;
1586
1587         if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1588                 /* We don't need to enable cb for XDP */
1589                 napi_complete_done(napi, 0);
1590                 return 0;
1591         }
1592
1593         txq = netdev_get_tx_queue(vi->dev, index);
1594         __netif_tx_lock(txq, raw_smp_processor_id());
1595         free_old_xmit_skbs(sq, true);
1596         __netif_tx_unlock(txq);
1597
1598         virtqueue_napi_complete(napi, sq->vq, 0);
1599
1600         if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1601                 netif_tx_wake_queue(txq);
1602
1603         return 0;
1604 }
1605
1606 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1607 {
1608         struct virtio_net_hdr_mrg_rxbuf *hdr;
1609         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1610         struct virtnet_info *vi = sq->vq->vdev->priv;
1611         int num_sg;
1612         unsigned hdr_len = vi->hdr_len;
1613         bool can_push;
1614
1615         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1616
1617         can_push = vi->any_header_sg &&
1618                 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1619                 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1620         /* Even if we can, don't push here yet as this would skew
1621          * csum_start offset below. */
1622         if (can_push)
1623                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1624         else
1625                 hdr = skb_vnet_hdr(skb);
1626
1627         if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1628                                     virtio_is_little_endian(vi->vdev), false,
1629                                     0))
1630                 BUG();
1631
1632         if (vi->mergeable_rx_bufs)
1633                 hdr->num_buffers = 0;
1634
1635         sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1636         if (can_push) {
1637                 __skb_push(skb, hdr_len);
1638                 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1639                 if (unlikely(num_sg < 0))
1640                         return num_sg;
1641                 /* Pull header back to avoid skew in tx bytes calculations. */
1642                 __skb_pull(skb, hdr_len);
1643         } else {
1644                 sg_set_buf(sq->sg, hdr, hdr_len);
1645                 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
1646                 if (unlikely(num_sg < 0))
1647                         return num_sg;
1648                 num_sg++;
1649         }
1650         return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1651 }
1652
1653 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1654 {
1655         struct virtnet_info *vi = netdev_priv(dev);
1656         int qnum = skb_get_queue_mapping(skb);
1657         struct send_queue *sq = &vi->sq[qnum];
1658         int err;
1659         struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1660         bool kick = !netdev_xmit_more();
1661         bool use_napi = sq->napi.weight;
1662
1663         /* Free up any pending old buffers before queueing new ones. */
1664         free_old_xmit_skbs(sq, false);
1665
1666         if (use_napi && kick)
1667                 virtqueue_enable_cb_delayed(sq->vq);
1668
1669         /* timestamp packet in software */
1670         skb_tx_timestamp(skb);
1671
1672         /* Try to transmit */
1673         err = xmit_skb(sq, skb);
1674
1675         /* This should not happen! */
1676         if (unlikely(err)) {
1677                 dev->stats.tx_fifo_errors++;
1678                 if (net_ratelimit())
1679                         dev_warn(&dev->dev,
1680                                  "Unexpected TXQ (%d) queue failure: %d\n",
1681                                  qnum, err);
1682                 dev->stats.tx_dropped++;
1683                 dev_kfree_skb_any(skb);
1684                 return NETDEV_TX_OK;
1685         }
1686
1687         /* Don't wait up for transmitted skbs to be freed. */
1688         if (!use_napi) {
1689                 skb_orphan(skb);
1690                 nf_reset_ct(skb);
1691         }
1692
1693         /* If running out of space, stop queue to avoid getting packets that we
1694          * are then unable to transmit.
1695          * An alternative would be to force queuing layer to requeue the skb by
1696          * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1697          * returned in a normal path of operation: it means that driver is not
1698          * maintaining the TX queue stop/start state properly, and causes
1699          * the stack to do a non-trivial amount of useless work.
1700          * Since most packets only take 1 or 2 ring slots, stopping the queue
1701          * early means 16 slots are typically wasted.
1702          */
1703         if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1704                 netif_stop_subqueue(dev, qnum);
1705                 if (!use_napi &&
1706                     unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1707                         /* More just got used, free them then recheck. */
1708                         free_old_xmit_skbs(sq, false);
1709                         if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1710                                 netif_start_subqueue(dev, qnum);
1711                                 virtqueue_disable_cb(sq->vq);
1712                         }
1713                 }
1714         }
1715
1716         if (kick || netif_xmit_stopped(txq)) {
1717                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1718                         u64_stats_update_begin(&sq->stats.syncp);
1719                         sq->stats.kicks++;
1720                         u64_stats_update_end(&sq->stats.syncp);
1721                 }
1722         }
1723
1724         return NETDEV_TX_OK;
1725 }
1726
1727 /*
1728  * Send command via the control virtqueue and check status.  Commands
1729  * supported by the hypervisor, as indicated by feature bits, should
1730  * never fail unless improperly formatted.
1731  */
1732 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1733                                  struct scatterlist *out)
1734 {
1735         struct scatterlist *sgs[4], hdr, stat;
1736         unsigned out_num = 0, tmp;
1737
1738         /* Caller should know better */
1739         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1740
1741         vi->ctrl->status = ~0;
1742         vi->ctrl->hdr.class = class;
1743         vi->ctrl->hdr.cmd = cmd;
1744         /* Add header */
1745         sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1746         sgs[out_num++] = &hdr;
1747
1748         if (out)
1749                 sgs[out_num++] = out;
1750
1751         /* Add return status. */
1752         sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1753         sgs[out_num] = &stat;
1754
1755         BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1756         virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1757
1758         if (unlikely(!virtqueue_kick(vi->cvq)))
1759                 return vi->ctrl->status == VIRTIO_NET_OK;
1760
1761         /* Spin for a response, the kick causes an ioport write, trapping
1762          * into the hypervisor, so the request should be handled immediately.
1763          */
1764         while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1765                !virtqueue_is_broken(vi->cvq))
1766                 cpu_relax();
1767
1768         return vi->ctrl->status == VIRTIO_NET_OK;
1769 }
1770
1771 static int virtnet_set_mac_address(struct net_device *dev, void *p)
1772 {
1773         struct virtnet_info *vi = netdev_priv(dev);
1774         struct virtio_device *vdev = vi->vdev;
1775         int ret;
1776         struct sockaddr *addr;
1777         struct scatterlist sg;
1778
1779         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
1780                 return -EOPNOTSUPP;
1781
1782         addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
1783         if (!addr)
1784                 return -ENOMEM;
1785
1786         ret = eth_prepare_mac_addr_change(dev, addr);
1787         if (ret)
1788                 goto out;
1789
1790         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1791                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
1792                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1793                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1794                         dev_warn(&vdev->dev,
1795                                  "Failed to set mac address by vq command.\n");
1796                         ret = -EINVAL;
1797                         goto out;
1798                 }
1799         } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1800                    !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1801                 unsigned int i;
1802
1803                 /* Naturally, this has an atomicity problem. */
1804                 for (i = 0; i < dev->addr_len; i++)
1805                         virtio_cwrite8(vdev,
1806                                        offsetof(struct virtio_net_config, mac) +
1807                                        i, addr->sa_data[i]);
1808         }
1809
1810         eth_commit_mac_addr_change(dev, p);
1811         ret = 0;
1812
1813 out:
1814         kfree(addr);
1815         return ret;
1816 }
1817
1818 static void virtnet_stats(struct net_device *dev,
1819                           struct rtnl_link_stats64 *tot)
1820 {
1821         struct virtnet_info *vi = netdev_priv(dev);
1822         unsigned int start;
1823         int i;
1824
1825         for (i = 0; i < vi->max_queue_pairs; i++) {
1826                 u64 tpackets, tbytes, rpackets, rbytes, rdrops;
1827                 struct receive_queue *rq = &vi->rq[i];
1828                 struct send_queue *sq = &vi->sq[i];
1829
1830                 do {
1831                         start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
1832                         tpackets = sq->stats.packets;
1833                         tbytes   = sq->stats.bytes;
1834                 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
1835
1836                 do {
1837                         start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
1838                         rpackets = rq->stats.packets;
1839                         rbytes   = rq->stats.bytes;
1840                         rdrops   = rq->stats.drops;
1841                 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
1842
1843                 tot->rx_packets += rpackets;
1844                 tot->tx_packets += tpackets;
1845                 tot->rx_bytes   += rbytes;
1846                 tot->tx_bytes   += tbytes;
1847                 tot->rx_dropped += rdrops;
1848         }
1849
1850         tot->tx_dropped = dev->stats.tx_dropped;
1851         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1852         tot->rx_length_errors = dev->stats.rx_length_errors;
1853         tot->rx_frame_errors = dev->stats.rx_frame_errors;
1854 }
1855
1856 static void virtnet_ack_link_announce(struct virtnet_info *vi)
1857 {
1858         rtnl_lock();
1859         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1860                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1861                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1862         rtnl_unlock();
1863 }
1864
1865 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1866 {
1867         struct scatterlist sg;
1868         struct net_device *dev = vi->dev;
1869
1870         if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1871                 return 0;
1872
1873         vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1874         sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
1875
1876         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1877                                   VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1878                 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1879                          queue_pairs);
1880                 return -EINVAL;
1881         } else {
1882                 vi->curr_queue_pairs = queue_pairs;
1883                 /* virtnet_open() will refill when device is going to up. */
1884                 if (dev->flags & IFF_UP)
1885                         schedule_delayed_work(&vi->refill, 0);
1886         }
1887
1888         return 0;
1889 }
1890
1891 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1892 {
1893         int err;
1894
1895         rtnl_lock();
1896         err = _virtnet_set_queues(vi, queue_pairs);
1897         rtnl_unlock();
1898         return err;
1899 }
1900
1901 static int virtnet_close(struct net_device *dev)
1902 {
1903         struct virtnet_info *vi = netdev_priv(dev);
1904         int i;
1905
1906         /* Make sure refill_work doesn't re-enable napi! */
1907         cancel_delayed_work_sync(&vi->refill);
1908
1909         for (i = 0; i < vi->max_queue_pairs; i++) {
1910                 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1911                 napi_disable(&vi->rq[i].napi);
1912                 virtnet_napi_tx_disable(&vi->sq[i].napi);
1913         }
1914
1915         return 0;
1916 }
1917
1918 static void virtnet_set_rx_mode(struct net_device *dev)
1919 {
1920         struct virtnet_info *vi = netdev_priv(dev);
1921         struct scatterlist sg[2];
1922         struct virtio_net_ctrl_mac *mac_data;
1923         struct netdev_hw_addr *ha;
1924         int uc_count;
1925         int mc_count;
1926         void *buf;
1927         int i;
1928
1929         /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1930         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1931                 return;
1932
1933         vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1934         vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1935
1936         sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1937
1938         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1939                                   VIRTIO_NET_CTRL_RX_PROMISC, sg))
1940                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1941                          vi->ctrl->promisc ? "en" : "dis");
1942
1943         sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1944
1945         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1946                                   VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1947                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1948                          vi->ctrl->allmulti ? "en" : "dis");
1949
1950         uc_count = netdev_uc_count(dev);
1951         mc_count = netdev_mc_count(dev);
1952         /* MAC filter - use one buffer for both lists */
1953         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1954                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1955         mac_data = buf;
1956         if (!buf)
1957                 return;
1958
1959         sg_init_table(sg, 2);
1960
1961         /* Store the unicast list and count in the front of the buffer */
1962         mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1963         i = 0;
1964         netdev_for_each_uc_addr(ha, dev)
1965                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1966
1967         sg_set_buf(&sg[0], mac_data,
1968                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1969
1970         /* multicast list and count fill the end */
1971         mac_data = (void *)&mac_data->macs[uc_count][0];
1972
1973         mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1974         i = 0;
1975         netdev_for_each_mc_addr(ha, dev)
1976                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1977
1978         sg_set_buf(&sg[1], mac_data,
1979                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1980
1981         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1982                                   VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1983                 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1984
1985         kfree(buf);
1986 }
1987
1988 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1989                                    __be16 proto, u16 vid)
1990 {
1991         struct virtnet_info *vi = netdev_priv(dev);
1992         struct scatterlist sg;
1993
1994         vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1995         sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1996
1997         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1998                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1999                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2000         return 0;
2001 }
2002
2003 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2004                                     __be16 proto, u16 vid)
2005 {
2006         struct virtnet_info *vi = netdev_priv(dev);
2007         struct scatterlist sg;
2008
2009         vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2010         sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2011
2012         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2013                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2014                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2015         return 0;
2016 }
2017
2018 static void virtnet_clean_affinity(struct virtnet_info *vi)
2019 {
2020         int i;
2021
2022         if (vi->affinity_hint_set) {
2023                 for (i = 0; i < vi->max_queue_pairs; i++) {
2024                         virtqueue_set_affinity(vi->rq[i].vq, NULL);
2025                         virtqueue_set_affinity(vi->sq[i].vq, NULL);
2026                 }
2027
2028                 vi->affinity_hint_set = false;
2029         }
2030 }
2031
2032 static void virtnet_set_affinity(struct virtnet_info *vi)
2033 {
2034         cpumask_var_t mask;
2035         int stragglers;
2036         int group_size;
2037         int i, j, cpu;
2038         int num_cpu;
2039         int stride;
2040
2041         if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2042                 virtnet_clean_affinity(vi);
2043                 return;
2044         }
2045
2046         num_cpu = num_online_cpus();
2047         stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2048         stragglers = num_cpu >= vi->curr_queue_pairs ?
2049                         num_cpu % vi->curr_queue_pairs :
2050                         0;
2051         cpu = cpumask_next(-1, cpu_online_mask);
2052
2053         for (i = 0; i < vi->curr_queue_pairs; i++) {
2054                 group_size = stride + (i < stragglers ? 1 : 0);
2055
2056                 for (j = 0; j < group_size; j++) {
2057                         cpumask_set_cpu(cpu, mask);
2058                         cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2059                                                 nr_cpu_ids, false);
2060                 }
2061                 virtqueue_set_affinity(vi->rq[i].vq, mask);
2062                 virtqueue_set_affinity(vi->sq[i].vq, mask);
2063                 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2064                 cpumask_clear(mask);
2065         }
2066
2067         vi->affinity_hint_set = true;
2068         free_cpumask_var(mask);
2069 }
2070
2071 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2072 {
2073         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2074                                                    node);
2075         virtnet_set_affinity(vi);
2076         return 0;
2077 }
2078
2079 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2080 {
2081         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2082                                                    node_dead);
2083         virtnet_set_affinity(vi);
2084         return 0;
2085 }
2086
2087 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2088 {
2089         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2090                                                    node);
2091
2092         virtnet_clean_affinity(vi);
2093         return 0;
2094 }
2095
2096 static enum cpuhp_state virtionet_online;
2097
2098 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2099 {
2100         int ret;
2101
2102         ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2103         if (ret)
2104                 return ret;
2105         ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2106                                                &vi->node_dead);
2107         if (!ret)
2108                 return ret;
2109         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2110         return ret;
2111 }
2112
2113 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2114 {
2115         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2116         cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2117                                             &vi->node_dead);
2118 }
2119
2120 static void virtnet_get_ringparam(struct net_device *dev,
2121                                 struct ethtool_ringparam *ring)
2122 {
2123         struct virtnet_info *vi = netdev_priv(dev);
2124
2125         ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2126         ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2127         ring->rx_pending = ring->rx_max_pending;
2128         ring->tx_pending = ring->tx_max_pending;
2129 }
2130
2131
2132 static void virtnet_get_drvinfo(struct net_device *dev,
2133                                 struct ethtool_drvinfo *info)
2134 {
2135         struct virtnet_info *vi = netdev_priv(dev);
2136         struct virtio_device *vdev = vi->vdev;
2137
2138         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
2139         strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
2140         strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
2141
2142 }
2143
2144 /* TODO: Eliminate OOO packets during switching */
2145 static int virtnet_set_channels(struct net_device *dev,
2146                                 struct ethtool_channels *channels)
2147 {
2148         struct virtnet_info *vi = netdev_priv(dev);
2149         u16 queue_pairs = channels->combined_count;
2150         int err;
2151
2152         /* We don't support separate rx/tx channels.
2153          * We don't allow setting 'other' channels.
2154          */
2155         if (channels->rx_count || channels->tx_count || channels->other_count)
2156                 return -EINVAL;
2157
2158         if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
2159                 return -EINVAL;
2160
2161         /* For now we don't support modifying channels while XDP is loaded
2162          * also when XDP is loaded all RX queues have XDP programs so we only
2163          * need to check a single RX queue.
2164          */
2165         if (vi->rq[0].xdp_prog)
2166                 return -EINVAL;
2167
2168         get_online_cpus();
2169         err = _virtnet_set_queues(vi, queue_pairs);
2170         if (err) {
2171                 put_online_cpus();
2172                 goto err;
2173         }
2174         virtnet_set_affinity(vi);
2175         put_online_cpus();
2176
2177         netif_set_real_num_tx_queues(dev, queue_pairs);
2178         netif_set_real_num_rx_queues(dev, queue_pairs);
2179  err:
2180         return err;
2181 }
2182
2183 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2184 {
2185         struct virtnet_info *vi = netdev_priv(dev);
2186         unsigned int i, j;
2187         u8 *p = data;
2188
2189         switch (stringset) {
2190         case ETH_SS_STATS:
2191                 for (i = 0; i < vi->curr_queue_pairs; i++) {
2192                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
2193                                 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
2194                                                 virtnet_rq_stats_desc[j].desc);
2195                 }
2196
2197                 for (i = 0; i < vi->curr_queue_pairs; i++) {
2198                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
2199                                 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
2200                                                 virtnet_sq_stats_desc[j].desc);
2201                 }
2202                 break;
2203         }
2204 }
2205
2206 static int virtnet_get_sset_count(struct net_device *dev, int sset)
2207 {
2208         struct virtnet_info *vi = netdev_priv(dev);
2209
2210         switch (sset) {
2211         case ETH_SS_STATS:
2212                 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
2213                                                VIRTNET_SQ_STATS_LEN);
2214         default:
2215                 return -EOPNOTSUPP;
2216         }
2217 }
2218
2219 static void virtnet_get_ethtool_stats(struct net_device *dev,
2220                                       struct ethtool_stats *stats, u64 *data)
2221 {
2222         struct virtnet_info *vi = netdev_priv(dev);
2223         unsigned int idx = 0, start, i, j;
2224         const u8 *stats_base;
2225         size_t offset;
2226
2227         for (i = 0; i < vi->curr_queue_pairs; i++) {
2228                 struct receive_queue *rq = &vi->rq[i];
2229
2230                 stats_base = (u8 *)&rq->stats;
2231                 do {
2232                         start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
2233                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
2234                                 offset = virtnet_rq_stats_desc[j].offset;
2235                                 data[idx + j] = *(u64 *)(stats_base + offset);
2236                         }
2237                 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
2238                 idx += VIRTNET_RQ_STATS_LEN;
2239         }
2240
2241         for (i = 0; i < vi->curr_queue_pairs; i++) {
2242                 struct send_queue *sq = &vi->sq[i];
2243
2244                 stats_base = (u8 *)&sq->stats;
2245                 do {
2246                         start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
2247                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
2248                                 offset = virtnet_sq_stats_desc[j].offset;
2249                                 data[idx + j] = *(u64 *)(stats_base + offset);
2250                         }
2251                 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
2252                 idx += VIRTNET_SQ_STATS_LEN;
2253         }
2254 }
2255
2256 static void virtnet_get_channels(struct net_device *dev,
2257                                  struct ethtool_channels *channels)
2258 {
2259         struct virtnet_info *vi = netdev_priv(dev);
2260
2261         channels->combined_count = vi->curr_queue_pairs;
2262         channels->max_combined = vi->max_queue_pairs;
2263         channels->max_other = 0;
2264         channels->rx_count = 0;
2265         channels->tx_count = 0;
2266         channels->other_count = 0;
2267 }
2268
2269 static int virtnet_set_link_ksettings(struct net_device *dev,
2270                                       const struct ethtool_link_ksettings *cmd)
2271 {
2272         struct virtnet_info *vi = netdev_priv(dev);
2273
2274         return ethtool_virtdev_set_link_ksettings(dev, cmd,
2275                                                   &vi->speed, &vi->duplex);
2276 }
2277
2278 static int virtnet_get_link_ksettings(struct net_device *dev,
2279                                       struct ethtool_link_ksettings *cmd)
2280 {
2281         struct virtnet_info *vi = netdev_priv(dev);
2282
2283         cmd->base.speed = vi->speed;
2284         cmd->base.duplex = vi->duplex;
2285         cmd->base.port = PORT_OTHER;
2286
2287         return 0;
2288 }
2289
2290 static int virtnet_set_coalesce(struct net_device *dev,
2291                                 struct ethtool_coalesce *ec)
2292 {
2293         struct virtnet_info *vi = netdev_priv(dev);
2294         int i, napi_weight;
2295
2296         if (ec->tx_max_coalesced_frames > 1 ||
2297             ec->rx_max_coalesced_frames != 1)
2298                 return -EINVAL;
2299
2300         napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
2301         if (napi_weight ^ vi->sq[0].napi.weight) {
2302                 if (dev->flags & IFF_UP)
2303                         return -EBUSY;
2304                 for (i = 0; i < vi->max_queue_pairs; i++)
2305                         vi->sq[i].napi.weight = napi_weight;
2306         }
2307
2308         return 0;
2309 }
2310
2311 static int virtnet_get_coalesce(struct net_device *dev,
2312                                 struct ethtool_coalesce *ec)
2313 {
2314         struct ethtool_coalesce ec_default = {
2315                 .cmd = ETHTOOL_GCOALESCE,
2316                 .rx_max_coalesced_frames = 1,
2317         };
2318         struct virtnet_info *vi = netdev_priv(dev);
2319
2320         memcpy(ec, &ec_default, sizeof(ec_default));
2321
2322         if (vi->sq[0].napi.weight)
2323                 ec->tx_max_coalesced_frames = 1;
2324
2325         return 0;
2326 }
2327
2328 static void virtnet_init_settings(struct net_device *dev)
2329 {
2330         struct virtnet_info *vi = netdev_priv(dev);
2331
2332         vi->speed = SPEED_UNKNOWN;
2333         vi->duplex = DUPLEX_UNKNOWN;
2334 }
2335
2336 static void virtnet_update_settings(struct virtnet_info *vi)
2337 {
2338         u32 speed;
2339         u8 duplex;
2340
2341         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
2342                 return;
2343
2344         virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
2345
2346         if (ethtool_validate_speed(speed))
2347                 vi->speed = speed;
2348
2349         virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
2350
2351         if (ethtool_validate_duplex(duplex))
2352                 vi->duplex = duplex;
2353 }
2354
2355 static const struct ethtool_ops virtnet_ethtool_ops = {
2356         .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
2357         .get_drvinfo = virtnet_get_drvinfo,
2358         .get_link = ethtool_op_get_link,
2359         .get_ringparam = virtnet_get_ringparam,
2360         .get_strings = virtnet_get_strings,
2361         .get_sset_count = virtnet_get_sset_count,
2362         .get_ethtool_stats = virtnet_get_ethtool_stats,
2363         .set_channels = virtnet_set_channels,
2364         .get_channels = virtnet_get_channels,
2365         .get_ts_info = ethtool_op_get_ts_info,
2366         .get_link_ksettings = virtnet_get_link_ksettings,
2367         .set_link_ksettings = virtnet_set_link_ksettings,
2368         .set_coalesce = virtnet_set_coalesce,
2369         .get_coalesce = virtnet_get_coalesce,
2370 };
2371
2372 static void virtnet_freeze_down(struct virtio_device *vdev)
2373 {
2374         struct virtnet_info *vi = vdev->priv;
2375         int i;
2376
2377         /* Make sure no work handler is accessing the device */
2378         flush_work(&vi->config_work);
2379
2380         netif_tx_lock_bh(vi->dev);
2381         netif_device_detach(vi->dev);
2382         netif_tx_unlock_bh(vi->dev);
2383         cancel_delayed_work_sync(&vi->refill);
2384
2385         if (netif_running(vi->dev)) {
2386                 for (i = 0; i < vi->max_queue_pairs; i++) {
2387                         napi_disable(&vi->rq[i].napi);
2388                         virtnet_napi_tx_disable(&vi->sq[i].napi);
2389                 }
2390         }
2391 }
2392
2393 static int init_vqs(struct virtnet_info *vi);
2394
2395 static int virtnet_restore_up(struct virtio_device *vdev)
2396 {
2397         struct virtnet_info *vi = vdev->priv;
2398         int err, i;
2399
2400         err = init_vqs(vi);
2401         if (err)
2402                 return err;
2403
2404         virtio_device_ready(vdev);
2405
2406         if (netif_running(vi->dev)) {
2407                 for (i = 0; i < vi->curr_queue_pairs; i++)
2408                         if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2409                                 schedule_delayed_work(&vi->refill, 0);
2410
2411                 for (i = 0; i < vi->max_queue_pairs; i++) {
2412                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2413                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2414                                                &vi->sq[i].napi);
2415                 }
2416         }
2417
2418         netif_tx_lock_bh(vi->dev);
2419         netif_device_attach(vi->dev);
2420         netif_tx_unlock_bh(vi->dev);
2421         return err;
2422 }
2423
2424 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2425 {
2426         struct scatterlist sg;
2427         vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2428
2429         sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2430
2431         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2432                                   VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
2433                 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
2434                 return -EINVAL;
2435         }
2436
2437         return 0;
2438 }
2439
2440 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2441 {
2442         u64 offloads = 0;
2443
2444         if (!vi->guest_offloads)
2445                 return 0;
2446
2447         return virtnet_set_guest_offloads(vi, offloads);
2448 }
2449
2450 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2451 {
2452         u64 offloads = vi->guest_offloads;
2453
2454         if (!vi->guest_offloads)
2455                 return 0;
2456
2457         return virtnet_set_guest_offloads(vi, offloads);
2458 }
2459
2460 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2461                            struct netlink_ext_ack *extack)
2462 {
2463         unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
2464         struct virtnet_info *vi = netdev_priv(dev);
2465         struct bpf_prog *old_prog;
2466         u16 xdp_qp = 0, curr_qp;
2467         int i, err;
2468
2469         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
2470             && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2471                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2472                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2473                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
2474                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2475                 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
2476                 return -EOPNOTSUPP;
2477         }
2478
2479         if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
2480                 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
2481                 return -EINVAL;
2482         }
2483
2484         if (dev->mtu > max_sz) {
2485                 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
2486                 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
2487                 return -EINVAL;
2488         }
2489
2490         curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
2491         if (prog)
2492                 xdp_qp = nr_cpu_ids;
2493
2494         /* XDP requires extra queues for XDP_TX */
2495         if (curr_qp + xdp_qp > vi->max_queue_pairs) {
2496                 netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
2497                             curr_qp + xdp_qp, vi->max_queue_pairs);
2498                 xdp_qp = 0;
2499         }
2500
2501         old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
2502         if (!prog && !old_prog)
2503                 return 0;
2504
2505         if (prog)
2506                 bpf_prog_add(prog, vi->max_queue_pairs - 1);
2507
2508         /* Make sure NAPI is not using any XDP TX queues for RX. */
2509         if (netif_running(dev)) {
2510                 for (i = 0; i < vi->max_queue_pairs; i++) {
2511                         napi_disable(&vi->rq[i].napi);
2512                         virtnet_napi_tx_disable(&vi->sq[i].napi);
2513                 }
2514         }
2515
2516         if (!prog) {
2517                 for (i = 0; i < vi->max_queue_pairs; i++) {
2518                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2519                         if (i == 0)
2520                                 virtnet_restore_guest_offloads(vi);
2521                 }
2522                 synchronize_net();
2523         }
2524
2525         err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2526         if (err)
2527                 goto err;
2528         netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2529         vi->xdp_queue_pairs = xdp_qp;
2530
2531         if (prog) {
2532                 vi->xdp_enabled = true;
2533                 for (i = 0; i < vi->max_queue_pairs; i++) {
2534                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2535                         if (i == 0 && !old_prog)
2536                                 virtnet_clear_guest_offloads(vi);
2537                 }
2538         } else {
2539                 vi->xdp_enabled = false;
2540         }
2541
2542         for (i = 0; i < vi->max_queue_pairs; i++) {
2543                 if (old_prog)
2544                         bpf_prog_put(old_prog);
2545                 if (netif_running(dev)) {
2546                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2547                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2548                                                &vi->sq[i].napi);
2549                 }
2550         }
2551
2552         return 0;
2553
2554 err:
2555         if (!prog) {
2556                 virtnet_clear_guest_offloads(vi);
2557                 for (i = 0; i < vi->max_queue_pairs; i++)
2558                         rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
2559         }
2560
2561         if (netif_running(dev)) {
2562                 for (i = 0; i < vi->max_queue_pairs; i++) {
2563                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2564                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2565                                                &vi->sq[i].napi);
2566                 }
2567         }
2568         if (prog)
2569                 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2570         return err;
2571 }
2572
2573 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2574 {
2575         switch (xdp->command) {
2576         case XDP_SETUP_PROG:
2577                 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
2578         default:
2579                 return -EINVAL;
2580         }
2581 }
2582
2583 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
2584                                       size_t len)
2585 {
2586         struct virtnet_info *vi = netdev_priv(dev);
2587         int ret;
2588
2589         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2590                 return -EOPNOTSUPP;
2591
2592         ret = snprintf(buf, len, "sby");
2593         if (ret >= len)
2594                 return -EOPNOTSUPP;
2595
2596         return 0;
2597 }
2598
2599 static int virtnet_set_features(struct net_device *dev,
2600                                 netdev_features_t features)
2601 {
2602         struct virtnet_info *vi = netdev_priv(dev);
2603         u64 offloads;
2604         int err;
2605
2606         if ((dev->features ^ features) & NETIF_F_LRO) {
2607                 if (vi->xdp_enabled)
2608                         return -EBUSY;
2609
2610                 if (features & NETIF_F_LRO)
2611                         offloads = vi->guest_offloads_capable;
2612                 else
2613                         offloads = vi->guest_offloads_capable &
2614                                    ~GUEST_OFFLOAD_LRO_MASK;
2615
2616                 err = virtnet_set_guest_offloads(vi, offloads);
2617                 if (err)
2618                         return err;
2619                 vi->guest_offloads = offloads;
2620         }
2621
2622         return 0;
2623 }
2624
2625 static const struct net_device_ops virtnet_netdev = {
2626         .ndo_open            = virtnet_open,
2627         .ndo_stop            = virtnet_close,
2628         .ndo_start_xmit      = start_xmit,
2629         .ndo_validate_addr   = eth_validate_addr,
2630         .ndo_set_mac_address = virtnet_set_mac_address,
2631         .ndo_set_rx_mode     = virtnet_set_rx_mode,
2632         .ndo_get_stats64     = virtnet_stats,
2633         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2634         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2635         .ndo_bpf                = virtnet_xdp,
2636         .ndo_xdp_xmit           = virtnet_xdp_xmit,
2637         .ndo_features_check     = passthru_features_check,
2638         .ndo_get_phys_port_name = virtnet_get_phys_port_name,
2639         .ndo_set_features       = virtnet_set_features,
2640 };
2641
2642 static void virtnet_config_changed_work(struct work_struct *work)
2643 {
2644         struct virtnet_info *vi =
2645                 container_of(work, struct virtnet_info, config_work);
2646         u16 v;
2647
2648         if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
2649                                  struct virtio_net_config, status, &v) < 0)
2650                 return;
2651
2652         if (v & VIRTIO_NET_S_ANNOUNCE) {
2653                 netdev_notify_peers(vi->dev);
2654                 virtnet_ack_link_announce(vi);
2655         }
2656
2657         /* Ignore unknown (future) status bits */
2658         v &= VIRTIO_NET_S_LINK_UP;
2659
2660         if (vi->status == v)
2661                 return;
2662
2663         vi->status = v;
2664
2665         if (vi->status & VIRTIO_NET_S_LINK_UP) {
2666                 virtnet_update_settings(vi);
2667                 netif_carrier_on(vi->dev);
2668                 netif_tx_wake_all_queues(vi->dev);
2669         } else {
2670                 netif_carrier_off(vi->dev);
2671                 netif_tx_stop_all_queues(vi->dev);
2672         }
2673 }
2674
2675 static void virtnet_config_changed(struct virtio_device *vdev)
2676 {
2677         struct virtnet_info *vi = vdev->priv;
2678
2679         schedule_work(&vi->config_work);
2680 }
2681
2682 static void virtnet_free_queues(struct virtnet_info *vi)
2683 {
2684         int i;
2685
2686         for (i = 0; i < vi->max_queue_pairs; i++) {
2687                 __netif_napi_del(&vi->rq[i].napi);
2688                 __netif_napi_del(&vi->sq[i].napi);
2689         }
2690
2691         /* We called __netif_napi_del(),
2692          * we need to respect an RCU grace period before freeing vi->rq
2693          */
2694         synchronize_net();
2695
2696         kfree(vi->rq);
2697         kfree(vi->sq);
2698         kfree(vi->ctrl);
2699 }
2700
2701 static void _free_receive_bufs(struct virtnet_info *vi)
2702 {
2703         struct bpf_prog *old_prog;
2704         int i;
2705
2706         for (i = 0; i < vi->max_queue_pairs; i++) {
2707                 while (vi->rq[i].pages)
2708                         __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
2709
2710                 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2711                 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
2712                 if (old_prog)
2713                         bpf_prog_put(old_prog);
2714         }
2715 }
2716
2717 static void free_receive_bufs(struct virtnet_info *vi)
2718 {
2719         rtnl_lock();
2720         _free_receive_bufs(vi);
2721         rtnl_unlock();
2722 }
2723
2724 static void free_receive_page_frags(struct virtnet_info *vi)
2725 {
2726         int i;
2727         for (i = 0; i < vi->max_queue_pairs; i++)
2728                 if (vi->rq[i].alloc_frag.page)
2729                         put_page(vi->rq[i].alloc_frag.page);
2730 }
2731
2732 static void free_unused_bufs(struct virtnet_info *vi)
2733 {
2734         void *buf;
2735         int i;
2736
2737         for (i = 0; i < vi->max_queue_pairs; i++) {
2738                 struct virtqueue *vq = vi->sq[i].vq;
2739                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2740                         if (!is_xdp_frame(buf))
2741                                 dev_kfree_skb(buf);
2742                         else
2743                                 xdp_return_frame(ptr_to_xdp(buf));
2744                 }
2745         }
2746
2747         for (i = 0; i < vi->max_queue_pairs; i++) {
2748                 struct virtqueue *vq = vi->rq[i].vq;
2749
2750                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2751                         if (vi->mergeable_rx_bufs) {
2752                                 put_page(virt_to_head_page(buf));
2753                         } else if (vi->big_packets) {
2754                                 give_pages(&vi->rq[i], buf);
2755                         } else {
2756                                 put_page(virt_to_head_page(buf));
2757                         }
2758                 }
2759         }
2760 }
2761
2762 static void virtnet_del_vqs(struct virtnet_info *vi)
2763 {
2764         struct virtio_device *vdev = vi->vdev;
2765
2766         virtnet_clean_affinity(vi);
2767
2768         vdev->config->del_vqs(vdev);
2769
2770         virtnet_free_queues(vi);
2771 }
2772
2773 /* How large should a single buffer be so a queue full of these can fit at
2774  * least one full packet?
2775  * Logic below assumes the mergeable buffer header is used.
2776  */
2777 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
2778 {
2779         const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2780         unsigned int rq_size = virtqueue_get_vring_size(vq);
2781         unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
2782         unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2783         unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2784
2785         return max(max(min_buf_len, hdr_len) - hdr_len,
2786                    (unsigned int)GOOD_PACKET_LEN);
2787 }
2788
2789 static int virtnet_find_vqs(struct virtnet_info *vi)
2790 {
2791         vq_callback_t **callbacks;
2792         struct virtqueue **vqs;
2793         int ret = -ENOMEM;
2794         int i, total_vqs;
2795         const char **names;
2796         bool *ctx;
2797
2798         /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
2799          * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
2800          * possible control vq.
2801          */
2802         total_vqs = vi->max_queue_pairs * 2 +
2803                     virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2804
2805         /* Allocate space for find_vqs parameters */
2806         vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
2807         if (!vqs)
2808                 goto err_vq;
2809         callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
2810         if (!callbacks)
2811                 goto err_callback;
2812         names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
2813         if (!names)
2814                 goto err_names;
2815         if (!vi->big_packets || vi->mergeable_rx_bufs) {
2816                 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
2817                 if (!ctx)
2818                         goto err_ctx;
2819         } else {
2820                 ctx = NULL;
2821         }
2822
2823         /* Parameters for control virtqueue, if any */
2824         if (vi->has_cvq) {
2825                 callbacks[total_vqs - 1] = NULL;
2826                 names[total_vqs - 1] = "control";
2827         }
2828
2829         /* Allocate/initialize parameters for send/receive virtqueues */
2830         for (i = 0; i < vi->max_queue_pairs; i++) {
2831                 callbacks[rxq2vq(i)] = skb_recv_done;
2832                 callbacks[txq2vq(i)] = skb_xmit_done;
2833                 sprintf(vi->rq[i].name, "input.%d", i);
2834                 sprintf(vi->sq[i].name, "output.%d", i);
2835                 names[rxq2vq(i)] = vi->rq[i].name;
2836                 names[txq2vq(i)] = vi->sq[i].name;
2837                 if (ctx)
2838                         ctx[rxq2vq(i)] = true;
2839         }
2840
2841         ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
2842                                          names, ctx, NULL);
2843         if (ret)
2844                 goto err_find;
2845
2846         if (vi->has_cvq) {
2847                 vi->cvq = vqs[total_vqs - 1];
2848                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2849                         vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2850         }
2851
2852         for (i = 0; i < vi->max_queue_pairs; i++) {
2853                 vi->rq[i].vq = vqs[rxq2vq(i)];
2854                 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
2855                 vi->sq[i].vq = vqs[txq2vq(i)];
2856         }
2857
2858         /* run here: ret == 0. */
2859
2860
2861 err_find:
2862         kfree(ctx);
2863 err_ctx:
2864         kfree(names);
2865 err_names:
2866         kfree(callbacks);
2867 err_callback:
2868         kfree(vqs);
2869 err_vq:
2870         return ret;
2871 }
2872
2873 static int virtnet_alloc_queues(struct virtnet_info *vi)
2874 {
2875         int i;
2876
2877         if (vi->has_cvq) {
2878                 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2879                 if (!vi->ctrl)
2880                         goto err_ctrl;
2881         } else {
2882                 vi->ctrl = NULL;
2883         }
2884         vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
2885         if (!vi->sq)
2886                 goto err_sq;
2887         vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
2888         if (!vi->rq)
2889                 goto err_rq;
2890
2891         INIT_DELAYED_WORK(&vi->refill, refill_work);
2892         for (i = 0; i < vi->max_queue_pairs; i++) {
2893                 vi->rq[i].pages = NULL;
2894                 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2895                                napi_weight);
2896                 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
2897                                   napi_tx ? napi_weight : 0);
2898
2899                 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
2900                 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
2901                 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
2902
2903                 u64_stats_init(&vi->rq[i].stats.syncp);
2904                 u64_stats_init(&vi->sq[i].stats.syncp);
2905         }
2906
2907         return 0;
2908
2909 err_rq:
2910         kfree(vi->sq);
2911 err_sq:
2912         kfree(vi->ctrl);
2913 err_ctrl:
2914         return -ENOMEM;
2915 }
2916
2917 static int init_vqs(struct virtnet_info *vi)
2918 {
2919         int ret;
2920
2921         /* Allocate send & receive queues */
2922         ret = virtnet_alloc_queues(vi);
2923         if (ret)
2924                 goto err;
2925
2926         ret = virtnet_find_vqs(vi);
2927         if (ret)
2928                 goto err_free;
2929
2930         get_online_cpus();
2931         virtnet_set_affinity(vi);
2932         put_online_cpus();
2933
2934         return 0;
2935
2936 err_free:
2937         virtnet_free_queues(vi);
2938 err:
2939         return ret;
2940 }
2941
2942 #ifdef CONFIG_SYSFS
2943 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2944                 char *buf)
2945 {
2946         struct virtnet_info *vi = netdev_priv(queue->dev);
2947         unsigned int queue_index = get_netdev_rx_queue_index(queue);
2948         unsigned int headroom = virtnet_get_headroom(vi);
2949         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2950         struct ewma_pkt_len *avg;
2951
2952         BUG_ON(queue_index >= vi->max_queue_pairs);
2953         avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2954         return sprintf(buf, "%u\n",
2955                        get_mergeable_buf_len(&vi->rq[queue_index], avg,
2956                                        SKB_DATA_ALIGN(headroom + tailroom)));
2957 }
2958
2959 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2960         __ATTR_RO(mergeable_rx_buffer_size);
2961
2962 static struct attribute *virtio_net_mrg_rx_attrs[] = {
2963         &mergeable_rx_buffer_size_attribute.attr,
2964         NULL
2965 };
2966
2967 static const struct attribute_group virtio_net_mrg_rx_group = {
2968         .name = "virtio_net",
2969         .attrs = virtio_net_mrg_rx_attrs
2970 };
2971 #endif
2972
2973 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2974                                     unsigned int fbit,
2975                                     const char *fname, const char *dname)
2976 {
2977         if (!virtio_has_feature(vdev, fbit))
2978                 return false;
2979
2980         dev_err(&vdev->dev, "device advertises feature %s but not %s",
2981                 fname, dname);
2982
2983         return true;
2984 }
2985
2986 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)                       \
2987         virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2988
2989 static bool virtnet_validate_features(struct virtio_device *vdev)
2990 {
2991         if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
2992             (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
2993                              "VIRTIO_NET_F_CTRL_VQ") ||
2994              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
2995                              "VIRTIO_NET_F_CTRL_VQ") ||
2996              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
2997                              "VIRTIO_NET_F_CTRL_VQ") ||
2998              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
2999              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
3000                              "VIRTIO_NET_F_CTRL_VQ"))) {
3001                 return false;
3002         }
3003
3004         return true;
3005 }
3006
3007 #define MIN_MTU ETH_MIN_MTU
3008 #define MAX_MTU ETH_MAX_MTU
3009
3010 static int virtnet_validate(struct virtio_device *vdev)
3011 {
3012         if (!vdev->config->get) {
3013                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
3014                         __func__);
3015                 return -EINVAL;
3016         }
3017
3018         if (!virtnet_validate_features(vdev))
3019                 return -EINVAL;
3020
3021         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3022                 int mtu = virtio_cread16(vdev,
3023                                          offsetof(struct virtio_net_config,
3024                                                   mtu));
3025                 if (mtu < MIN_MTU)
3026                         __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
3027         }
3028
3029         return 0;
3030 }
3031
3032 static int virtnet_probe(struct virtio_device *vdev)
3033 {
3034         int i, err = -ENOMEM;
3035         struct net_device *dev;
3036         struct virtnet_info *vi;
3037         u16 max_queue_pairs;
3038         int mtu;
3039
3040         /* Find if host supports multiqueue virtio_net device */
3041         err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
3042                                    struct virtio_net_config,
3043                                    max_virtqueue_pairs, &max_queue_pairs);
3044
3045         /* We need at least 2 queue's */
3046         if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
3047             max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
3048             !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3049                 max_queue_pairs = 1;
3050
3051         /* Allocate ourselves a network device with room for our info */
3052         dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
3053         if (!dev)
3054                 return -ENOMEM;
3055
3056         /* Set up network device as normal. */
3057         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
3058                            IFF_TX_SKB_NO_LINEAR;
3059         dev->netdev_ops = &virtnet_netdev;
3060         dev->features = NETIF_F_HIGHDMA;
3061
3062         dev->ethtool_ops = &virtnet_ethtool_ops;
3063         SET_NETDEV_DEV(dev, &vdev->dev);
3064
3065         /* Do we support "hardware" checksums? */
3066         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
3067                 /* This opens up the world of extra features. */
3068                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3069                 if (csum)
3070                         dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3071
3072                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
3073                         dev->hw_features |= NETIF_F_TSO
3074                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
3075                 }
3076                 /* Individual feature bits: what can host handle? */
3077                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
3078                         dev->hw_features |= NETIF_F_TSO;
3079                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
3080                         dev->hw_features |= NETIF_F_TSO6;
3081                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
3082                         dev->hw_features |= NETIF_F_TSO_ECN;
3083
3084                 dev->features |= NETIF_F_GSO_ROBUST;
3085
3086                 if (gso)
3087                         dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
3088                 /* (!csum && gso) case will be fixed by register_netdev() */
3089         }
3090         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
3091                 dev->features |= NETIF_F_RXCSUM;
3092         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3093             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
3094                 dev->features |= NETIF_F_LRO;
3095         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
3096                 dev->hw_features |= NETIF_F_LRO;
3097
3098         dev->vlan_features = dev->features;
3099
3100         /* MTU range: 68 - 65535 */
3101         dev->min_mtu = MIN_MTU;
3102         dev->max_mtu = MAX_MTU;
3103
3104         /* Configuration may specify what MAC to use.  Otherwise random. */
3105         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
3106                 virtio_cread_bytes(vdev,
3107                                    offsetof(struct virtio_net_config, mac),
3108                                    dev->dev_addr, dev->addr_len);
3109         else
3110                 eth_hw_addr_random(dev);
3111
3112         /* Set up our device-specific information */
3113         vi = netdev_priv(dev);
3114         vi->dev = dev;
3115         vi->vdev = vdev;
3116         vdev->priv = vi;
3117
3118         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
3119
3120         /* If we can receive ANY GSO packets, we must allocate large ones. */
3121         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3122             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3123             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
3124             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
3125                 vi->big_packets = true;
3126
3127         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
3128                 vi->mergeable_rx_bufs = true;
3129
3130         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
3131             virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
3132                 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
3133         else
3134                 vi->hdr_len = sizeof(struct virtio_net_hdr);
3135
3136         if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
3137             virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
3138                 vi->any_header_sg = true;
3139
3140         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3141                 vi->has_cvq = true;
3142
3143         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3144                 mtu = virtio_cread16(vdev,
3145                                      offsetof(struct virtio_net_config,
3146                                               mtu));
3147                 if (mtu < dev->min_mtu) {
3148                         /* Should never trigger: MTU was previously validated
3149                          * in virtnet_validate.
3150                          */
3151                         dev_err(&vdev->dev,
3152                                 "device MTU appears to have changed it is now %d < %d",
3153                                 mtu, dev->min_mtu);
3154                         err = -EINVAL;
3155                         goto free;
3156                 }
3157
3158                 dev->mtu = mtu;
3159                 dev->max_mtu = mtu;
3160
3161                 /* TODO: size buffers correctly in this case. */
3162                 if (dev->mtu > ETH_DATA_LEN)
3163                         vi->big_packets = true;
3164         }
3165
3166         if (vi->any_header_sg)
3167                 dev->needed_headroom = vi->hdr_len;
3168
3169         /* Enable multiqueue by default */
3170         if (num_online_cpus() >= max_queue_pairs)
3171                 vi->curr_queue_pairs = max_queue_pairs;
3172         else
3173                 vi->curr_queue_pairs = num_online_cpus();
3174         vi->max_queue_pairs = max_queue_pairs;
3175
3176         /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3177         err = init_vqs(vi);
3178         if (err)
3179                 goto free;
3180
3181 #ifdef CONFIG_SYSFS
3182         if (vi->mergeable_rx_bufs)
3183                 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
3184 #endif
3185         netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
3186         netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
3187
3188         virtnet_init_settings(dev);
3189
3190         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3191                 vi->failover = net_failover_create(vi->dev);
3192                 if (IS_ERR(vi->failover)) {
3193                         err = PTR_ERR(vi->failover);
3194                         goto free_vqs;
3195                 }
3196         }
3197
3198         err = register_netdev(dev);
3199         if (err) {
3200                 pr_debug("virtio_net: registering device failed\n");
3201                 goto free_failover;
3202         }
3203
3204         virtio_device_ready(vdev);
3205
3206         err = virtnet_cpu_notif_add(vi);
3207         if (err) {
3208                 pr_debug("virtio_net: registering cpu notifier failed\n");
3209                 goto free_unregister_netdev;
3210         }
3211
3212         virtnet_set_queues(vi, vi->curr_queue_pairs);
3213
3214         /* Assume link up if device can't report link status,
3215            otherwise get link status from config. */
3216         netif_carrier_off(dev);
3217         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3218                 schedule_work(&vi->config_work);
3219         } else {
3220                 vi->status = VIRTIO_NET_S_LINK_UP;
3221                 virtnet_update_settings(vi);
3222                 netif_carrier_on(dev);
3223         }
3224
3225         for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
3226                 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
3227                         set_bit(guest_offloads[i], &vi->guest_offloads);
3228         vi->guest_offloads_capable = vi->guest_offloads;
3229
3230         pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
3231                  dev->name, max_queue_pairs);
3232
3233         return 0;
3234
3235 free_unregister_netdev:
3236         vi->vdev->config->reset(vdev);
3237
3238         unregister_netdev(dev);
3239 free_failover:
3240         net_failover_destroy(vi->failover);
3241 free_vqs:
3242         cancel_delayed_work_sync(&vi->refill);
3243         free_receive_page_frags(vi);
3244         virtnet_del_vqs(vi);
3245 free:
3246         free_netdev(dev);
3247         return err;
3248 }
3249
3250 static void remove_vq_common(struct virtnet_info *vi)
3251 {
3252         vi->vdev->config->reset(vi->vdev);
3253
3254         /* Free unused buffers in both send and recv, if any. */
3255         free_unused_bufs(vi);
3256
3257         free_receive_bufs(vi);
3258
3259         free_receive_page_frags(vi);
3260
3261         virtnet_del_vqs(vi);
3262 }
3263
3264 static void virtnet_remove(struct virtio_device *vdev)
3265 {
3266         struct virtnet_info *vi = vdev->priv;
3267
3268         virtnet_cpu_notif_remove(vi);
3269
3270         /* Make sure no work handler is accessing the device. */
3271         flush_work(&vi->config_work);
3272
3273         unregister_netdev(vi->dev);
3274
3275         net_failover_destroy(vi->failover);
3276
3277         remove_vq_common(vi);
3278
3279         free_netdev(vi->dev);
3280 }
3281
3282 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
3283 {
3284         struct virtnet_info *vi = vdev->priv;
3285
3286         virtnet_cpu_notif_remove(vi);
3287         virtnet_freeze_down(vdev);
3288         remove_vq_common(vi);
3289
3290         return 0;
3291 }
3292
3293 static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
3294 {
3295         struct virtnet_info *vi = vdev->priv;
3296         int err;
3297
3298         err = virtnet_restore_up(vdev);
3299         if (err)
3300                 return err;
3301         virtnet_set_queues(vi, vi->curr_queue_pairs);
3302
3303         err = virtnet_cpu_notif_add(vi);
3304         if (err)
3305                 return err;
3306
3307         return 0;
3308 }
3309
3310 static struct virtio_device_id id_table[] = {
3311         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
3312         { 0 },
3313 };
3314
3315 #define VIRTNET_FEATURES \
3316         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
3317         VIRTIO_NET_F_MAC, \
3318         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
3319         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
3320         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
3321         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
3322         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
3323         VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
3324         VIRTIO_NET_F_CTRL_MAC_ADDR, \
3325         VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
3326         VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
3327
3328 static unsigned int features[] = {
3329         VIRTNET_FEATURES,
3330 };
3331
3332 static unsigned int features_legacy[] = {
3333         VIRTNET_FEATURES,
3334         VIRTIO_NET_F_GSO,
3335         VIRTIO_F_ANY_LAYOUT,
3336 };
3337
3338 static struct virtio_driver virtio_net_driver = {
3339         .feature_table = features,
3340         .feature_table_size = ARRAY_SIZE(features),
3341         .feature_table_legacy = features_legacy,
3342         .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
3343         .driver.name =  KBUILD_MODNAME,
3344         .driver.owner = THIS_MODULE,
3345         .id_table =     id_table,
3346         .validate =     virtnet_validate,
3347         .probe =        virtnet_probe,
3348         .remove =       virtnet_remove,
3349         .config_changed = virtnet_config_changed,
3350 #ifdef CONFIG_PM_SLEEP
3351         .freeze =       virtnet_freeze,
3352         .restore =      virtnet_restore,
3353 #endif
3354 };
3355
3356 static __init int virtio_net_driver_init(void)
3357 {
3358         int ret;
3359
3360         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
3361                                       virtnet_cpu_online,
3362                                       virtnet_cpu_down_prep);
3363         if (ret < 0)
3364                 goto out;
3365         virtionet_online = ret;
3366         ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
3367                                       NULL, virtnet_cpu_dead);
3368         if (ret)
3369                 goto err_dead;
3370
3371         ret = register_virtio_driver(&virtio_net_driver);
3372         if (ret)
3373                 goto err_virtio;
3374         return 0;
3375 err_virtio:
3376         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3377 err_dead:
3378         cpuhp_remove_multi_state(virtionet_online);
3379 out:
3380         return ret;
3381 }
3382 module_init(virtio_net_driver_init);
3383
3384 static __exit void virtio_net_driver_exit(void)
3385 {
3386         unregister_virtio_driver(&virtio_net_driver);
3387         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3388         cpuhp_remove_multi_state(virtionet_online);
3389 }
3390 module_exit(virtio_net_driver_exit);
3391
3392 MODULE_DEVICE_TABLE(virtio, id_table);
3393 MODULE_DESCRIPTION("Virtio network driver");
3394 MODULE_LICENSE("GPL");